summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/Makefile39
-rw-r--r--tools/accounting/.gitignore1
-rw-r--r--tools/accounting/Makefile2
-rw-r--r--tools/accounting/getdelays.c8
-rw-r--r--tools/accounting/procacct.c417
-rw-r--r--tools/arch/arm64/include/asm/cputype.h12
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h36
-rw-r--r--tools/arch/arm64/include/uapi/asm/perf_regs.h7
-rw-r--r--tools/arch/h8300/include/asm/bitsperlong.h15
-rw-r--r--tools/arch/h8300/include/uapi/asm/mman.h7
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h21
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h29
-rw-r--r--tools/arch/x86/include/asm/msr-index.h57
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h23
-rw-r--r--tools/arch/x86/include/uapi/asm/svm.h13
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst16
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-feature.rst12
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst5
-rw-r--r--tools/bpf/bpftool/Makefile13
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool30
-rw-r--r--tools/bpf/bpftool/btf.c119
-rw-r--r--tools/bpf/bpftool/btf_dumper.c29
-rw-r--r--tools/bpf/bpftool/cgroup.c162
-rw-r--r--tools/bpf/bpftool/common.c154
-rw-r--r--tools/bpf/bpftool/feature.c170
-rw-r--r--tools/bpf/bpftool/gen.c120
-rw-r--r--tools/bpf/bpftool/link.c57
-rw-r--r--tools/bpf/bpftool/main.c4
-rw-r--r--tools/bpf/bpftool/main.h22
-rw-r--r--tools/bpf/bpftool/map.c82
-rw-r--r--tools/bpf/bpftool/perf.c112
-rw-r--r--tools/bpf/bpftool/prog.c77
-rw-r--r--tools/bpf/bpftool/tracelog.c2
-rw-r--r--tools/bpf/resolve_btfids/main.c40
-rw-r--r--tools/bpf/runqslower/Makefile7
-rw-r--r--tools/bpf/runqslower/runqslower.c18
-rw-r--r--tools/build/Makefile.feature4
-rw-r--r--tools/build/feature/Makefile20
-rw-r--r--tools/build/feature/test-libbpf-bpf_map_create.c8
-rw-r--r--tools/build/feature/test-libbpf-bpf_object__next_map.c8
-rw-r--r--tools/build/feature/test-libbpf-bpf_object__next_program.c8
-rw-r--r--tools/build/feature/test-libbpf-bpf_prog_load.c9
-rw-r--r--tools/build/feature/test-libbpf-btf__load_from_kernel_by_id.c5
-rw-r--r--tools/build/feature/test-libbpf-btf__raw_data.c8
-rw-r--r--tools/gpio/gpio-event-mon.c6
-rw-r--r--tools/include/linux/arm-smccc.h193
-rw-r--r--tools/include/linux/bitmap.h17
-rw-r--r--tools/include/linux/btf_ids.h35
-rw-r--r--tools/include/linux/objtool.h17
-rw-r--r--tools/include/linux/sched/mm.h2
-rw-r--r--tools/include/nolibc/Makefile37
-rw-r--r--tools/include/nolibc/stdio.h4
-rw-r--r--tools/include/nolibc/stdlib.h7
-rw-r--r--tools/include/uapi/asm-generic/fcntl.h30
-rw-r--r--tools/include/uapi/asm-generic/socket.h2
-rw-r--r--tools/include/uapi/asm-generic/unistd.h4
-rw-r--r--tools/include/uapi/asm/bitsperlong.h2
-rw-r--r--tools/include/uapi/asm/bpf_perf_event.h2
-rw-r--r--tools/include/uapi/drm/i915_drm.h359
-rw-r--r--tools/include/uapi/linux/bpf.h224
-rw-r--r--tools/include/uapi/linux/btf.h21
-rw-r--r--tools/include/uapi/linux/fs.h2
-rw-r--r--tools/include/uapi/linux/if_link.h3
-rw-r--r--tools/include/uapi/linux/if_tun.h2
-rw-r--r--tools/include/uapi/linux/kvm.h65
-rw-r--r--tools/include/uapi/linux/perf_event.h2
-rw-r--r--tools/include/uapi/linux/pkt_cls.h4
-rw-r--r--tools/include/uapi/linux/prctl.h9
-rw-r--r--tools/include/uapi/linux/seg6.h4
-rw-r--r--tools/include/uapi/linux/usbdevice_fs.h4
-rw-r--r--tools/include/uapi/linux/vhost.h26
-rw-r--r--tools/include/uapi/sound/asound.h2
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat3
-rw-r--r--tools/lib/bitmap.c20
-rw-r--r--tools/lib/bpf/Build5
-rw-r--r--tools/lib/bpf/Makefile6
-rw-r--r--tools/lib/bpf/bpf.c345
-rw-r--r--tools/lib/bpf/bpf.h155
-rw-r--r--tools/lib/bpf/bpf_core_read.h48
-rw-r--r--tools/lib/bpf/bpf_helpers.h39
-rw-r--r--tools/lib/bpf/bpf_tracing.h83
-rw-r--r--tools/lib/bpf/btf.c427
-rw-r--r--tools/lib/bpf/btf.h118
-rw-r--r--tools/lib/bpf/btf_dump.c160
-rw-r--r--tools/lib/bpf/gen_loader.c2
-rw-r--r--tools/lib/bpf/libbpf.c3267
-rw-r--r--tools/lib/bpf/libbpf.h828
-rw-r--r--tools/lib/bpf/libbpf.map138
-rw-r--r--tools/lib/bpf/libbpf_common.h16
-rw-r--r--tools/lib/bpf/libbpf_internal.h70
-rw-r--r--tools/lib/bpf/libbpf_legacy.h28
-rw-r--r--tools/lib/bpf/libbpf_probes.c125
-rw-r--r--tools/lib/bpf/libbpf_version.h4
-rw-r--r--tools/lib/bpf/linker.c7
-rw-r--r--tools/lib/bpf/netlink.c62
-rw-r--r--tools/lib/bpf/relo_core.c581
-rw-r--r--tools/lib/bpf/relo_core.h16
-rw-r--r--tools/lib/bpf/usdt.bpf.h247
-rw-r--r--tools/lib/bpf/usdt.c1519
-rw-r--r--tools/lib/perf/evlist.c84
-rw-r--r--tools/lib/perf/evsel.c51
-rw-r--r--tools/lib/perf/include/internal/evlist.h3
-rw-r--r--tools/lib/perf/include/internal/evsel.h11
-rw-r--r--tools/lib/perf/include/internal/lib.h2
-rw-r--r--tools/lib/perf/include/perf/cpumap.h3
-rw-r--r--tools/lib/perf/include/perf/evsel.h1
-rw-r--r--tools/lib/perf/lib.c20
-rw-r--r--tools/lib/thermal/.gitignore2
-rw-r--r--tools/lib/thermal/Build5
-rw-r--r--tools/lib/thermal/Makefile165
-rw-r--r--tools/lib/thermal/commands.c349
-rw-r--r--tools/lib/thermal/events.c164
-rw-r--r--tools/lib/thermal/include/thermal.h142
-rw-r--r--tools/lib/thermal/libthermal.map25
-rw-r--r--tools/lib/thermal/libthermal.pc.template12
-rw-r--r--tools/lib/thermal/sampling.c75
-rw-r--r--tools/lib/thermal/thermal.c135
-rw-r--r--tools/lib/thermal/thermal_nl.c215
-rw-r--r--tools/lib/thermal/thermal_nl.h46
-rw-r--r--tools/objtool/Makefile4
-rw-r--r--tools/objtool/arch/x86/decode.c5
-rw-r--r--tools/objtool/builtin-check.c13
-rw-r--r--tools/objtool/check.c339
-rw-r--r--tools/objtool/include/objtool/arch.h1
-rw-r--r--tools/objtool/include/objtool/builtin.h2
-rw-r--r--tools/objtool/include/objtool/check.h24
-rw-r--r--tools/objtool/include/objtool/elf.h1
-rw-r--r--tools/objtool/include/objtool/objtool.h1
-rw-r--r--tools/objtool/objtool.c1
-rw-r--r--tools/perf/.gitignore1
-rw-r--r--tools/perf/Documentation/perf-annotate.txt5
-rw-r--r--tools/perf/Documentation/perf-arm-spe.txt218
-rw-r--r--tools/perf/Documentation/perf-c2c.txt8
-rw-r--r--tools/perf/Documentation/perf-intel-pt.txt165
-rw-r--r--tools/perf/Documentation/perf-kvm.txt3
-rw-r--r--tools/perf/Documentation/perf-lock.txt21
-rw-r--r--tools/perf/Documentation/perf-record.txt12
-rw-r--r--tools/perf/Documentation/perf-script.txt4
-rw-r--r--tools/perf/Documentation/perf-stat.txt12
-rw-r--r--tools/perf/Documentation/perf-top.txt2
-rw-r--r--tools/perf/Documentation/perf.txt2
-rw-r--r--tools/perf/Makefile.config67
-rw-r--r--tools/perf/Makefile.perf6
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c1
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c1
-rw-r--r--tools/perf/arch/arm64/util/mem-events.c6
-rw-r--r--tools/perf/arch/arm64/util/perf_regs.c38
-rw-r--r--tools/perf/arch/arm64/util/unwind-libunwind.c73
-rw-r--r--tools/perf/arch/riscv/Makefile1
-rw-r--r--tools/perf/arch/s390/util/auxtrace.c1
-rw-r--r--tools/perf/arch/x86/util/evlist.c7
-rw-r--r--tools/perf/arch/x86/util/evsel.c32
-rw-r--r--tools/perf/arch/x86/util/evsel.h7
-rw-r--r--tools/perf/arch/x86/util/intel-bts.c1
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c32
-rw-r--r--tools/perf/arch/x86/util/topdown.c46
-rw-r--r--tools/perf/arch/x86/util/topdown.h7
-rw-r--r--tools/perf/bench/Build1
-rw-r--r--tools/perf/bench/bench.h2
-rw-r--r--tools/perf/bench/breakpoint.c244
-rw-r--r--tools/perf/builtin-annotate.c24
-rw-r--r--tools/perf/builtin-bench.c8
-rw-r--r--tools/perf/builtin-c2c.c90
-rw-r--r--tools/perf/builtin-inject.c166
-rw-r--r--tools/perf/builtin-kvm.c2
-rw-r--r--tools/perf/builtin-lock.c94
-rw-r--r--tools/perf/builtin-record.c77
-rw-r--r--tools/perf/builtin-script.c37
-rw-r--r--tools/perf/builtin-stat.c78
-rw-r--r--tools/perf/builtin-trace.c2
-rw-r--r--tools/perf/builtin-version.c1
-rw-r--r--tools/perf/perf-with-kcore.sh247
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a34/branch.json11
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a34/bus.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a34/cache.json32
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a34/exception.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a34/instruction.json29
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a34/memory.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a35/branch.json11
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a35/bus.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a35/cache.json32
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a35/exception.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a35/instruction.json44
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a35/memory.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a510/branch.json59
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a510/bus.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a510/cache.json182
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a510/exception.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a510/instruction.json95
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a510/memory.json32
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a510/pipeline.json107
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a510/pmu.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a510/trace.json32
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a55/branch.json59
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a55/bus.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a55/cache.json188
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a55/exception.json20
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a55/instruction.json65
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a55/memory.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a55/pipeline.json80
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/branch.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/bus.json29
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/cache.json80
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/core-imp-def.json179
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/exception.json47
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/instruction.json68
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/memory.json20
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a65/branch.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a65/bus.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a65/cache.json236
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a65/dpu.json32
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a65/exception.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a65/ifu.json122
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a65/instruction.json71
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a65/memory.json35
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a65/pipeline.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a710/branch.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a710/bus.json20
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a710/cache.json155
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a710/exception.json47
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a710/instruction.json134
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a710/memory.json41
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a710/pipeline.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a710/trace.json29
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a73/branch.json11
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a73/bus.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a73/cache.json107
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a73/etm.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a73/exception.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a73/instruction.json65
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a73/memory.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a73/mmu.json44
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a73/pipeline.json38
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a75/branch.json11
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a75/bus.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a75/cache.json164
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a75/etm.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a75/exception.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a75/instruction.json74
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a75/memory.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a75/mmu.json44
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a75/pipeline.json44
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a77/branch.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a77/bus.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a77/cache.json143
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a77/exception.json47
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a77/instruction.json77
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a77/memory.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a77/pipeline.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a78/branch.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a78/bus.json20
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a78/cache.json155
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a78/exception.json47
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a78/instruction.json80
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a78/memory.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a78/pipeline.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x1/branch.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x1/bus.json20
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x1/cache.json155
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x1/exception.json47
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x1/instruction.json80
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x1/memory.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x1/pipeline.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x2/branch.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x2/bus.json20
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x2/cache.json155
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x2/exception.json47
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x2/instruction.json134
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x2/memory.json41
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x2/pipeline.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x2/trace.json29
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/branch.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/bus.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/cache.json107
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/exception.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/instruction.json65
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/memory.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/pipeline.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/spe.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/common-and-microarch.json66
-rw-r--r--tools/perf/pmu-events/arch/arm64/mapfile.csv13
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z10/basic.json48
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z10/crypto.json64
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z10/extended.json36
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/basic.json48
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/crypto.json64
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/extended.json100
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/basic.json32
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/crypto.json64
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/extended.json102
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/basic.json32
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/crypto.json114
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json112
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/extended.json108
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z16/basic.json58
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z16/crypto6.json142
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z16/extended.json492
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z16/transaction.json7
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z196/basic.json48
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z196/crypto.json64
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z196/extended.json44
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/basic.json48
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json64
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/extended.json66
-rw-r--r--tools/perf/pmu-events/arch/s390/mapfile.csv1
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json792
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/cache.json1164
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/memory.json702
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/other.json156
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json14
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json61
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json96
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/other.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/other.json31
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/pipeline.json31
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/other.json37
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json37
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/cache.json41
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json36
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/memory.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/other.json38
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/pipeline.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/cache.json31
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json24
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/memory.json21
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/other.json70
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/pipeline.json14
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/pipeline.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv1
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/other.json66
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/pipeline.json66
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json1083
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json218
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json471
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json415
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/other.json362
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json1283
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json530
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-memory.json499
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-other.json5150
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-power.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/virtual-memory.json225
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/cache.json174
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/memory.json90
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/pipeline.json14
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/cache.json74
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/memory.json74
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/pipeline.json14
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json96
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/other.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/tremontx/other.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/tremontx/pipeline.json13
-rw-r--r--tools/perf/pmu-events/arch/x86/tremontx/uncore-memory.json22
-rw-r--r--tools/perf/pmu-events/arch/x86/tremontx/uncore-other.json94
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/other.json66
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/pipeline.json66
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json14
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/memory.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/other.json66
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/pipeline.json66
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/cache.json14
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/memory.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/other.json66
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/pipeline.json66
-rw-r--r--tools/perf/pmu-events/jevents.c92
-rwxr-xr-xtools/perf/scripts/python/arm-cs-trace-disasm.py274
-rw-r--r--tools/perf/scripts/python/intel-pt-events.py8
-rw-r--r--tools/perf/tests/bp_account.c16
-rw-r--r--tools/perf/tests/builtin-test.c10
-rw-r--r--tools/perf/tests/evsel-roundtrip-name.c2
-rw-r--r--tools/perf/tests/expr.c2
-rw-r--r--tools/perf/tests/mmap-basic.c18
-rw-r--r--tools/perf/tests/openat-syscall-all-cpus.c23
-rw-r--r--tools/perf/tests/openat-syscall.c20
-rw-r--r--tools/perf/tests/parse-events.c492
-rw-r--r--tools/perf/tests/perf-record.c18
-rw-r--r--tools/perf/tests/perf-time-to-tsc.c27
-rw-r--r--tools/perf/tests/pmu-events.c30
-rwxr-xr-xtools/perf/tests/shell/record.sh80
-rwxr-xr-xtools/perf/tests/shell/record_offcpu.sh60
-rwxr-xr-xtools/perf/tests/shell/stat+csv_output.sh168
-rwxr-xr-xtools/perf/tests/shell/stat.sh80
-rwxr-xr-xtools/perf/tests/shell/test_arm_callgraph_fp.sh2
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe_fork.sh92
-rwxr-xr-xtools/perf/tests/shell/test_intel_pt.sh71
-rw-r--r--tools/perf/tests/topology.c2
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c12
-rwxr-xr-xtools/perf/trace/beauty/arch_errno_names.sh14
-rw-r--r--tools/perf/trace/beauty/include/linux/socket.h7
-rw-r--r--tools/perf/util/Build1
-rw-r--r--tools/perf/util/arm-spe.c22
-rw-r--r--tools/perf/util/auxtrace.c31
-rw-r--r--tools/perf/util/auxtrace.h13
-rw-r--r--tools/perf/util/bpf-event.c24
-rw-r--r--tools/perf/util/bpf-loader.c249
-rw-r--r--tools/perf/util/bpf-utils.c5
-rw-r--r--tools/perf/util/bpf_counter.c67
-rw-r--r--tools/perf/util/bpf_counter_cgroup.c42
-rw-r--r--tools/perf/util/bpf_off_cpu.c343
-rw-r--r--tools/perf/util/bpf_skel/off_cpu.bpf.c237
-rw-r--r--tools/perf/util/build-id.c28
-rw-r--r--tools/perf/util/data.c14
-rw-r--r--tools/perf/util/data.h2
-rw-r--r--tools/perf/util/dso.h2
-rw-r--r--tools/perf/util/event.c7
-rw-r--r--tools/perf/util/evlist.c123
-rw-r--r--tools/perf/util/evlist.h7
-rw-r--r--tools/perf/util/evsel.c93
-rw-r--r--tools/perf/util/evsel.h29
-rw-r--r--tools/perf/util/expr.l2
-rw-r--r--tools/perf/util/genelf.h3
-rw-r--r--tools/perf/util/header.c76
-rw-r--r--tools/perf/util/header.h17
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c99
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.h1
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c1
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h1
-rw-r--r--tools/perf/util/intel-pt.c57
-rw-r--r--tools/perf/util/libunwind/arm64.c2
-rw-r--r--tools/perf/util/machine.c101
-rw-r--r--tools/perf/util/machine.h5
-rw-r--r--tools/perf/util/mem-events.c32
-rw-r--r--tools/perf/util/mem-events.h1
-rw-r--r--tools/perf/util/metricgroup.c133
-rw-r--r--tools/perf/util/mmap.c4
-rw-r--r--tools/perf/util/off_cpu.h38
-rw-r--r--tools/perf/util/parse-events.c48
-rw-r--r--tools/perf/util/parse-events.l2
-rw-r--r--tools/perf/util/path.c14
-rw-r--r--tools/perf/util/path.h1
-rw-r--r--tools/perf/util/perf_regs.c2
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c70
-rw-r--r--tools/perf/util/session.c7
-rw-r--r--tools/perf/util/stat-display.c46
-rw-r--r--tools/perf/util/stat-shadow.c29
-rw-r--r--tools/perf/util/stat.c14
-rw-r--r--tools/perf/util/stat.h22
-rw-r--r--tools/perf/util/symbol-elf.c56
-rw-r--r--tools/perf/util/symbol_conf.h3
-rw-r--r--tools/perf/util/synthetic-events.c9
-rw-r--r--tools/perf/util/topdown.c17
-rw-r--r--tools/perf/util/topdown.h3
-rw-r--r--tools/perf/util/unwind-libunwind-local.c105
-rw-r--r--tools/perf/util/util.c5
-rw-r--r--tools/power/acpi/common/cmfsize.c2
-rw-r--r--tools/power/acpi/common/getopt.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslinuxtbl.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixdir.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixmap.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixxf.c2
-rw-r--r--tools/power/acpi/tools/acpidump/acpidump.h2
-rw-r--r--tools/power/acpi/tools/acpidump/apdump.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apfiles.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apmain.c2
-rw-r--r--tools/power/cpupower/debug/i386/dump_psb.c6
-rw-r--r--tools/power/pm-graph/README6
-rwxr-xr-xtools/power/pm-graph/bootgraph.py20
-rw-r--r--tools/power/pm-graph/config/custom-timeline-functions.cfg2
-rwxr-xr-xtools/power/pm-graph/sleepgraph.py518
-rw-r--r--tools/power/x86/turbostat/Makefile2
-rw-r--r--tools/power/x86/turbostat/turbostat.8202
-rw-r--r--tools/power/x86/turbostat/turbostat.c828
-rw-r--r--tools/spi/spidev_test.c11
-rw-r--r--tools/testing/crypto/chacha20-s390/Makefile12
-rw-r--r--tools/testing/crypto/chacha20-s390/run-tests.sh34
-rw-r--r--tools/testing/crypto/chacha20-s390/test-cipher.c372
-rw-r--r--tools/testing/cxl/Kbuild3
-rw-r--r--tools/testing/cxl/mock_mem.c10
-rw-r--r--tools/testing/cxl/test/mem.c17
-rw-r--r--tools/testing/cxl/test/mock.c29
-rw-r--r--tools/testing/kunit/configs/all_tests_uml.config37
-rw-r--r--tools/testing/kunit/configs/arch_uml.config5
-rw-r--r--tools/testing/kunit/configs/coverage_uml.config11
-rwxr-xr-xtools/testing/kunit/kunit.py142
-rw-r--r--tools/testing/kunit/kunit_config.py73
-rw-r--r--tools/testing/kunit/kunit_json.py66
-rw-r--r--tools/testing/kunit/kunit_kernel.py183
-rw-r--r--tools/testing/kunit/kunit_parser.py191
-rw-r--r--tools/testing/kunit/kunit_printer.py48
-rwxr-xr-xtools/testing/kunit/kunit_tool_test.py254
-rw-r--r--tools/testing/kunit/qemu_config.py17
-rw-r--r--tools/testing/kunit/qemu_configs/alpha.py2
-rw-r--r--tools/testing/kunit/qemu_configs/arm.py2
-rw-r--r--tools/testing/kunit/qemu_configs/arm64.py2
-rw-r--r--tools/testing/kunit/qemu_configs/i386.py4
-rw-r--r--tools/testing/kunit/qemu_configs/powerpc.py2
-rw-r--r--tools/testing/kunit/qemu_configs/riscv.py7
-rw-r--r--tools/testing/kunit/qemu_configs/s390.py4
-rw-r--r--tools/testing/kunit/qemu_configs/sparc.py2
-rw-r--r--tools/testing/kunit/qemu_configs/x86_64.py2
-rwxr-xr-xtools/testing/kunit/run_checks.py2
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-crash.log70
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log2
-rw-r--r--tools/testing/memblock/TODO3
-rw-r--r--tools/testing/memblock/tests/basic_api.c392
-rw-r--r--tools/testing/nvdimm/pmem-dax.c4
-rw-r--r--tools/testing/nvdimm/test/iomap.c18
-rw-r--r--tools/testing/nvdimm/test/nfit.c3
-rw-r--r--tools/testing/selftests/Makefile33
-rw-r--r--tools/testing/selftests/alsa/Makefile3
-rw-r--r--tools/testing/selftests/alsa/mixer-test.c41
-rw-r--r--tools/testing/selftests/arm64/mte/Makefile1
-rw-r--r--tools/testing/selftests/arm64/signal/Makefile1
-rw-r--r--tools/testing/selftests/arm64/signal/test_signals.h4
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c2
-rw-r--r--tools/testing/selftests/bpf/.gitignore3
-rw-r--r--tools/testing/selftests/bpf/DENYLIST6
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.s390x67
-rw-r--r--tools/testing/selftests/bpf/Makefile65
-rw-r--r--tools/testing/selftests/bpf/bench.c100
-rw-r--r--tools/testing/selftests/bpf/bench.h16
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c96
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_local_storage.c287
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c281
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_bpf_hashmap_full_update.sh11
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_local_storage.sh24
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_local_storage_rcu_tasks_trace.sh11
-rw-r--r--tools/testing/selftests/bpf/benchs/run_common.sh17
-rw-r--r--tools/testing/selftests/bpf/bpf_legacy.h9
-rw-r--r--tools/testing/selftests/bpf/bpf_rlimit.h28
-rw-r--r--tools/testing/selftests/bpf/bpf_tcp_helpers.h13
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c10
-rw-r--r--tools/testing/selftests/bpf/btf_helpers.c25
-rw-r--r--tools/testing/selftests/bpf/config89
-rw-r--r--tools/testing/selftests/bpf/config.s390x147
-rw-r--r--tools/testing/selftests/bpf/config.x86_64251
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.c6
-rw-r--r--tools/testing/selftests/bpf/get_cgroup_id_user.c4
-rw-r--r--tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c252
-rw-r--r--tools/testing/selftests/bpf/network_helpers.c42
-rw-r--r--tools/testing/selftests/bpf/network_helpers.h2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arg_parsing.c107
-rw-r--r--tools/testing/selftests/bpf/prog_tests/attach_probe.c144
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_cookie.c167
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c281
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_loop.c62
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_nf.c64
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c67
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c257
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_write.c126
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_autosize.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_extern.c17
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_reloc.c153
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_retro.c17
-rw-r--r--tools/testing/selftests/bpf/prog_tests/dynptr.c137
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_stress.c36
-rw-r--r--tools/testing/selftests/bpf/prog_tests/for_each.c42
-rw-r--r--tools/testing/selftests/bpf/prog_tests/helper_restricted.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c164
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ksyms_btf.c17
-rw-r--r--tools/testing/selftests/bpf/prog_tests/libbpf_str.c207
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_funcs.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/log_fixup.c149
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c15
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c313
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_kptr.c148
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c58
-rw-r--r--tools/testing/selftests/bpf/prog_tests/mptcp.c174
-rw-r--r--tools/testing/selftests/bpf/prog_tests/netcnt.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/probe_user.c35
-rw-r--r--tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c56
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reference_tracking.c23
-rw-r--r--tools/testing/selftests/bpf/prog_tests/resolve_btfids.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c21
-rw-r--r--tools/testing/selftests/bpf/prog_tests/send_signal.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c45
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skeleton.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/snprintf.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_fields.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c84
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c11
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tailcalls.c55
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_redirect.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_global_funcs.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_strncmp.c25
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_tunnel.c436
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer_mim.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trampoline_count.c134
-rw-r--r--tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c312
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c50
-rw-r--r--tools/testing/selftests/bpf/prog_tests/usdt.c419
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c183
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_hashmap_full_update_bench.c40
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter.h14
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c21
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_ksym.c74
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_loop.c114
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_syscall_macro.c6
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_tracing_net.h1
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___diff.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___err_missing.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___val3_missing.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_offs.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___diff.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c2
-rw-r--r--tools/testing/selftests/bpf/progs/core_reloc_types.h208
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_fail.c588
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_success.c164
-rw-r--r--tools/testing/selftests/bpf/progs/exhandler_kern.c13
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_map_elem_write_key.c27
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_global_func.c18
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi.c38
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_empty.c12
-rw-r--r--tools/testing/selftests/bpf/progs/linked_funcs1.c15
-rw-r--r--tools/testing/selftests/bpf/progs/linked_funcs2.c15
-rw-r--r--tools/testing/selftests/bpf/progs/local_storage_bench.c104
-rw-r--r--tools/testing/selftests/bpf/progs/local_storage_rcu_tasks_trace_bench.c67
-rw-r--r--tools/testing/selftests/bpf/progs/loop5.c1
-rw-r--r--tools/testing/selftests/bpf/progs/lsm_cgroup.c180
-rw-r--r--tools/testing/selftests/bpf/progs/lsm_cgroup_nonvoid.c14
-rw-r--r--tools/testing/selftests/bpf/progs/map_kptr.c292
-rw-r--r--tools/testing/selftests/bpf/progs/map_kptr_fail.c418
-rw-r--r--tools/testing/selftests/bpf/progs/mptcp_sock.c88
-rw-r--r--tools/testing/selftests/bpf/progs/perf_event_stackmap.c4
-rw-r--r--tools/testing/selftests/bpf/progs/profiler.inc.h5
-rw-r--r--tools/testing/selftests/bpf/progs/profiler1.c1
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf.h6
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf600.c11
-rw-r--r--tools/testing/selftests/bpf/progs/skb_load_bytes.c19
-rw-r--r--tools/testing/selftests/bpf/progs/strncmp_test.c8
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c42
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_incompl_cong_ops.c35
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_unsupp_cong_op.c21
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c60
-rw-r--r--tools/testing/selftests/bpf/progs/test_attach_probe.c127
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_cookie.c56
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_nf.c85
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c134
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_haskv.c51
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_newkv.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_extern.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_enum64val.c70
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_existence.c11
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c19
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_size.c31
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_type_based.c49
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func17.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_helper_restricted.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb_noinline.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_log_fixup.c64
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c76
-rw-r--r--tools/testing/selftests/bpf/progs/test_module_attach.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_pkt_access.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_probe_user.c50
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf_multi.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_assign.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_skeleton.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_subprogs.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_task_pt_regs.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_dtime.c53
-rw-r--r--tools/testing/selftests/bpf/progs/test_trampoline_count.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_tunnel_kern.c433
-rw-r--r--tools/testing/selftests/bpf/progs/test_unpriv_bpf_disabled.c83
-rw-r--r--tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c73
-rw-r--r--tools/testing/selftests/bpf/progs/test_urandom_usdt.c70
-rw-r--r--tools/testing/selftests/bpf/progs/test_usdt.c96
-rw-r--r--tools/testing/selftests/bpf/progs/test_usdt_multispec.c32
-rw-r--r--tools/testing/selftests/bpf/progs/test_varlen.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_noinline.c42
-rw-r--r--tools/testing/selftests/bpf/progs/trigger_bench.c2
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c843
-rw-r--r--tools/testing/selftests/bpf/sdt-config.h6
-rw-r--r--tools/testing/selftests/bpf/sdt.h513
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_synctypes.py184
-rw-r--r--tools/testing/selftests/bpf/test_btf.h3
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c7
-rw-r--r--tools/testing/selftests/bpf/test_dev_cgroup.c4
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c43
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c70
-rwxr-xr-xtools/testing/selftests/bpf/test_offload.py2
-rw-r--r--tools/testing/selftests/bpf/test_progs.c1013
-rw-r--r--tools/testing/selftests/bpf/test_progs.h89
-rw-r--r--tools/testing/selftests/bpf/test_skb_cgroup_id_user.c4
-rw-r--r--tools/testing/selftests/bpf/test_sock.c6
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c4
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c5
-rw-r--r--tools/testing/selftests/bpf/test_sysctl.c6
-rw-r--r--tools/testing/selftests/bpf/test_tag.c4
-rw-r--r--tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c4
-rw-r--r--tools/testing/selftests/bpf/test_tcpnotify_user.c1
-rwxr-xr-xtools/testing/selftests/bpf/test_tunnel.sh124
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c420
-rw-r--r--tools/testing/selftests/bpf/test_verifier_log.c5
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_veth.sh6
-rwxr-xr-xtools/testing/selftests/bpf/test_xdping.sh4
-rwxr-xr-xtools/testing/selftests/bpf/test_xsk.sh57
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.c91
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.h8
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c9
-rw-r--r--tools/testing/selftests/bpf/urandom_read.c63
-rw-r--r--tools/testing/selftests/bpf/urandom_read_aux.c9
-rw-r--r--tools/testing/selftests/bpf/urandom_read_lib1.c13
-rw-r--r--tools/testing/selftests/bpf/urandom_read_lib2.c8
-rw-r--r--tools/testing/selftests/bpf/verifier/bpf_loop_inline.c264
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c73
-rw-r--r--tools/testing/selftests/bpf/verifier/jmp32.c21
-rw-r--r--tools/testing/selftests/bpf/verifier/jump.c22
-rw-r--r--tools/testing/selftests/bpf/verifier/map_kptr.c469
-rw-r--r--tools/testing/selftests/bpf/verifier/ref_tracking.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/sock.c6
-rwxr-xr-xtools/testing/selftests/bpf/vmtest.sh53
-rw-r--r--tools/testing/selftests/bpf/xdp_redirect_multi.c1
-rw-r--r--tools/testing/selftests/bpf/xdp_synproxy.c466
-rw-r--r--tools/testing/selftests/bpf/xdping.c8
-rw-r--r--tools/testing/selftests/bpf/xsk.c (renamed from tools/lib/bpf/xsk.c)92
-rw-r--r--tools/testing/selftests/bpf/xsk.h (renamed from tools/lib/bpf/xsk.h)30
-rwxr-xr-xtools/testing/selftests/bpf/xsk_prereqs.sh51
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.c (renamed from tools/testing/selftests/bpf/xdpxceiver.c)578
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.h (renamed from tools/testing/selftests/bpf/xdpxceiver.h)48
-rw-r--r--tools/testing/selftests/cgroup/.gitignore1
-rw-r--r--tools/testing/selftests/cgroup/Makefile2
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.c64
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.h5
-rw-r--r--tools/testing/selftests/cgroup/config8
-rw-r--r--tools/testing/selftests/cgroup/memcg_protection.m89
-rw-r--r--tools/testing/selftests/cgroup/test_cpu.c726
-rw-r--r--tools/testing/selftests/cgroup/test_memcontrol.c392
-rwxr-xr-xtools/testing/selftests/cgroup/test_stress.sh2
-rw-r--r--tools/testing/selftests/damon/_chk_dependency.sh10
-rw-r--r--tools/testing/selftests/damon/sysfs.sh1
-rw-r--r--tools/testing/selftests/dma/Makefile1
-rw-r--r--tools/testing/selftests/dma/dma_map_benchmark.c2
-rw-r--r--tools/testing/selftests/drivers/.gitignore1
-rw-r--r--tools/testing/selftests/drivers/dma-buf/udmabuf.c3
-rwxr-xr-xtools/testing/selftests/drivers/gpu/drm_mm.sh4
-rw-r--r--tools/testing/selftests/drivers/net/dsa/Makefile17
l---------tools/testing/selftests/drivers/net/dsa/bridge_locked_port.sh1
l---------tools/testing/selftests/drivers/net/dsa/bridge_mdb.sh1
l---------tools/testing/selftests/drivers/net/dsa/bridge_mld.sh1
l---------tools/testing/selftests/drivers/net/dsa/bridge_vlan_aware.sh1
l---------tools/testing/selftests/drivers/net/dsa/bridge_vlan_mcast.sh1
l---------tools/testing/selftests/drivers/net/dsa/bridge_vlan_unaware.sh1
-rw-r--r--tools/testing/selftests/drivers/net/dsa/forwarding.config2
l---------tools/testing/selftests/drivers/net/dsa/lib.sh1
l---------tools/testing/selftests/drivers/net/dsa/local_termination.sh1
l---------tools/testing/selftests/drivers/net/dsa/no_forwarding.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_linecard.sh334
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_burst.sh480
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh4
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh4
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/rif_counter_scale.sh107
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh5
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh5
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh31
l---------tools/testing/selftests/drivers/net/mlxsw/spectrum-2/rif_counter_scale.sh1
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower_scale.sh15
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh29
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/rif_counter_scale.sh34
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh17
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/fib.sh45
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/hw_stats_l3.sh4
-rwxr-xr-xtools/testing/selftests/drivers/net/ocelot/basic_qos.sh253
-rwxr-xr-xtools/testing/selftests/drivers/net/ocelot/psfp.sh327
-rwxr-xr-xtools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh202
-rw-r--r--tools/testing/selftests/drivers/s390x/uvdevice/Makefile21
-rw-r--r--tools/testing/selftests/drivers/s390x/uvdevice/config1
-rw-r--r--tools/testing/selftests/drivers/s390x/uvdevice/test_uvdevice.c276
-rw-r--r--tools/testing/selftests/filesystems/binderfs/binderfs_test.c4
-rw-r--r--tools/testing/selftests/filesystems/binderfs/config1
-rw-r--r--tools/testing/selftests/firmware/Makefile2
-rw-r--r--tools/testing/selftests/firmware/config1
-rwxr-xr-xtools/testing/selftests/firmware/fw_filesystem.sh170
-rwxr-xr-xtools/testing/selftests/firmware/fw_lib.sh19
-rwxr-xr-xtools/testing/selftests/firmware/fw_run_tests.sh4
-rwxr-xr-xtools/testing/selftests/firmware/fw_upload.sh214
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc3
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc4
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc2
-rw-r--r--tools/testing/selftests/futex/functional/Makefile1
-rw-r--r--tools/testing/selftests/gpio/Makefile2
-rwxr-xr-xtools/testing/selftests/ir/ir_loopback.sh2
-rw-r--r--tools/testing/selftests/kcmp/kcmp_test.c6
-rwxr-xr-xtools/testing/selftests/kexec/kexec_common_lib.sh36
-rw-r--r--tools/testing/selftests/kselftest.h15
-rwxr-xr-xtools/testing/selftests/kselftest_deps.sh2
-rw-r--r--tools/testing/selftests/kselftest_module.h4
-rw-r--r--tools/testing/selftests/kvm/.gitignore16
-rw-r--r--tools/testing/selftests/kvm/Makefile76
-rw-r--r--tools/testing/selftests/kvm/aarch64/arch_timer.c88
-rw-r--r--tools/testing/selftests/kvm/aarch64/debug-exceptions.c26
-rw-r--r--tools/testing/selftests/kvm/aarch64/get-reg-list.c38
-rw-r--r--tools/testing/selftests/kvm/aarch64/hypercalls.c313
-rw-r--r--tools/testing/selftests/kvm/aarch64/psci_cpu_on_test.c121
-rw-r--r--tools/testing/selftests/kvm/aarch64/psci_test.c199
-rw-r--r--tools/testing/selftests/kvm/aarch64/vcpu_width_config.c71
-rw-r--r--tools/testing/selftests/kvm/aarch64/vgic_init.c446
-rw-r--r--tools/testing/selftests/kvm/aarch64/vgic_irq.c44
-rw-r--r--tools/testing/selftests/kvm/access_tracking_perf_test.c92
-rw-r--r--tools/testing/selftests/kvm/demand_paging_test.c49
-rw-r--r--tools/testing/selftests/kvm/dirty_log_perf_test.c89
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c95
-rw-r--r--tools/testing/selftests/kvm/hardware_disable_test.c29
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/processor.h50
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/vgic.h6
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util_base.h823
-rw-r--r--tools/testing/selftests/kvm/include/perf_test_util.h14
-rw-r--r--tools/testing/selftests/kvm/include/riscv/processor.h28
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h7
-rw-r--r--tools/testing/selftests/kvm/include/ucall_common.h65
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/apic.h1
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/evmcs.h2
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/mce.h25
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h499
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/svm.h2
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/svm_util.h27
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/vmx.h8
-rw-r--r--tools/testing/selftests/kvm/kvm_binary_stats_test.c183
-rw-r--r--tools/testing/selftests/kvm/kvm_create_max_vcpus.c10
-rw-r--r--tools/testing/selftests/kvm/kvm_page_table_test.c66
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c106
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/ucall.c22
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/vgic.c54
-rw-r--r--tools/testing/selftests/kvm/lib/elf.c1
-rw-r--r--tools/testing/selftests/kvm/lib/guest_modes.c6
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c1207
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util_internal.h128
-rw-r--r--tools/testing/selftests/kvm/lib/perf_test_util.c125
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/processor.c120
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/ucall.c39
-rw-r--r--tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c11
-rw-r--r--tools/testing/selftests/kvm/lib/s390x/processor.c44
-rw-r--r--tools/testing/selftests/kvm/lib/s390x/ucall.c10
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c111
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c842
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/svm.c17
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/ucall.c12
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c167
-rw-r--r--tools/testing/selftests/kvm/max_guest_memory_test.c55
-rw-r--r--tools/testing/selftests/kvm/memslot_modification_stress_test.c13
-rw-r--r--tools/testing/selftests/kvm/memslot_perf_test.c32
-rw-r--r--tools/testing/selftests/kvm/rseq_test.c30
-rw-r--r--tools/testing/selftests/kvm/s390x/memop.c226
-rw-r--r--tools/testing/selftests/kvm/s390x/resets.c178
-rw-r--r--tools/testing/selftests/kvm/s390x/sync_regs_test.c121
-rw-r--r--tools/testing/selftests/kvm/s390x/tprot.c68
-rw-r--r--tools/testing/selftests/kvm/set_memory_region_test.c46
-rw-r--r--tools/testing/selftests/kvm/steal_time.c136
-rw-r--r--tools/testing/selftests/kvm/system_counter_offset_test.c38
-rw-r--r--tools/testing/selftests/kvm/x86_64/amx_test.c91
-rw-r--r--tools/testing/selftests/kvm/x86_64/cpuid_test.c105
-rw-r--r--tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c43
-rw-r--r--tools/testing/selftests/kvm/x86_64/debug_regs.c77
-rw-r--r--tools/testing/selftests/kvm/x86_64/emulator_error_test.c85
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c65
-rw-r--r--tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c163
-rw-r--r--tools/testing/selftests/kvm/x86_64/get_msr_index_features.c117
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_clock.c38
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c48
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_features.c406
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c28
-rw-r--r--tools/testing/selftests/kvm/x86_64/kvm_clock_test.c32
-rw-r--r--tools/testing/selftests/kvm/x86_64/kvm_pv_test.c117
-rw-r--r--tools/testing/selftests/kvm/x86_64/max_vcpuid_cap_test.c44
-rw-r--r--tools/testing/selftests/kvm/x86_64/mmio_warning_test.c16
-rw-r--r--tools/testing/selftests/kvm/x86_64/mmu_role_test.c147
-rw-r--r--tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c131
-rw-r--r--tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c269
-rwxr-xr-xtools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh59
-rw-r--r--tools/testing/selftests/kvm/x86_64/platform_info_test.c51
-rw-r--r--tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c117
-rw-r--r--tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c95
-rw-r--r--tools/testing/selftests/kvm/x86_64/set_sregs_test.c75
-rw-r--r--tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c131
-rw-r--r--tools/testing/selftests/kvm/x86_64/smm_test.c46
-rw-r--r--tools/testing/selftests/kvm/x86_64/state_test.c39
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c25
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c211
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c20
-rw-r--r--tools/testing/selftests/kvm/x86_64/sync_regs_test.c62
-rw-r--r--tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c90
-rw-r--r--tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c39
-rw-r--r--tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c118
-rw-r--r--tools/testing/selftests/kvm/x86_64/ucna_injection_test.c316
-rw-r--r--tools/testing/selftests/kvm/x86_64/userspace_io_test.c22
-rw-r--r--tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c188
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c32
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c21
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c18
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c68
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c22
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_msrs_test.c84
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c33
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c102
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c114
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c38
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c105
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c17
-rw-r--r--tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c48
-rw-r--r--tools/testing/selftests/kvm/x86_64/xapic_state_test.c82
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c409
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c27
-rw-r--r--tools/testing/selftests/kvm/x86_64/xss_msr_test.c56
-rw-r--r--tools/testing/selftests/landlock/Makefile10
-rw-r--r--tools/testing/selftests/lib.mk63
-rw-r--r--tools/testing/selftests/lkdtm/config4
-rw-r--r--tools/testing/selftests/lkdtm/tests.txt9
-rw-r--r--tools/testing/selftests/mqueue/mq_perf_tests.c4
-rw-r--r--tools/testing/selftests/net/.gitignore3
-rw-r--r--tools/testing/selftests/net/Makefile12
-rw-r--r--tools/testing/selftests/net/af_unix/Makefile3
-rw-r--r--tools/testing/selftests/net/af_unix/unix_connect.c148
-rwxr-xr-xtools/testing/selftests/net/arp_ndisc_untracked_subnets.sh308
-rw-r--r--tools/testing/selftests/net/bpf/Makefile4
-rw-r--r--tools/testing/selftests/net/cmsg_sender.c2
-rwxr-xr-xtools/testing/selftests/net/fcnal-test.sh61
-rwxr-xr-xtools/testing/selftests/net/fib_nexthop_nongw.sh119
-rwxr-xr-xtools/testing/selftests/net/fib_nexthops.sh53
-rwxr-xr-xtools/testing/selftests/net/fib_rule_tests.sh35
-rw-r--r--tools/testing/selftests/net/forwarding/Makefile5
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mdb.sh103
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mdb_port_down.sh118
-rwxr-xr-xtools/testing/selftests/net/forwarding/ethtool_extended_state.sh43
-rwxr-xr-xtools/testing/selftests/net/forwarding/hw_stats_l3.sh16
-rwxr-xr-xtools/testing/selftests/net/forwarding/hw_stats_l3_gre.sh109
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/net/forwarding/lib.sh150
-rwxr-xr-xtools/testing/selftests/net/forwarding/local_termination.sh299
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh7
-rwxr-xr-xtools/testing/selftests/net/forwarding/no_forwarding.sh261
-rwxr-xr-xtools/testing/selftests/net/forwarding/router.sh18
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_vid_1.sh27
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_actions.sh2
-rw-r--r--tools/testing/selftests/net/forwarding/tsn_lib.sh235
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_asymmetric.sh2
-rw-r--r--tools/testing/selftests/net/io_uring_zerocopy_tx.c605
-rwxr-xr-xtools/testing/selftests/net/io_uring_zerocopy_tx.sh131
-rwxr-xr-xtools/testing/selftests/net/ioam6.sh12
-rw-r--r--tools/testing/selftests/net/ipv6_flowlabel.c75
-rwxr-xr-xtools/testing/selftests/net/ipv6_flowlabel.sh16
-rw-r--r--tools/testing/selftests/net/mptcp/Makefile3
-rw-r--r--tools/testing/selftests/net/mptcp/config8
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh86
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c2
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_inq.c2
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh359
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_sockopt.c2
-rw-r--r--tools/testing/selftests/net/mptcp/pm_nl_ctl.c720
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh14
-rwxr-xr-xtools/testing/selftests/net/mptcp/userspace_pm.sh817
-rwxr-xr-xtools/testing/selftests/net/ndisc_unsolicited_na_test.sh254
-rw-r--r--tools/testing/selftests/net/psock_snd.c2
-rwxr-xr-xtools/testing/selftests/net/srv6_hencap_red_l3vpn_test.sh879
-rwxr-xr-xtools/testing/selftests/net/srv6_hl2encap_red_l2vpn_test.sh821
-rw-r--r--tools/testing/selftests/net/stress_reuseport_listen.c105
-rwxr-xr-xtools/testing/selftests/net/stress_reuseport_listen.sh25
-rw-r--r--tools/testing/selftests/net/tls.c124
-rw-r--r--tools/testing/selftests/net/tun.c162
-rwxr-xr-xtools/testing/selftests/net/udpgro.sh2
-rwxr-xr-xtools/testing/selftests/net/udpgro_bench.sh2
-rwxr-xr-xtools/testing/selftests/net/udpgro_frglist.sh2
-rwxr-xr-xtools/testing/selftests/net/udpgro_fwd.sh2
-rwxr-xr-xtools/testing/selftests/net/udpgso_bench.sh2
-rwxr-xr-xtools/testing/selftests/net/veth.sh6
-rwxr-xr-xtools/testing/selftests/net/vrf_strict_mode_test.sh48
-rwxr-xr-xtools/testing/selftests/netfilter/nft_concat_range.sh2
-rwxr-xr-xtools/testing/selftests/netfilter/nft_fib.sh50
-rwxr-xr-xtools/testing/selftests/netfilter/nft_nat.sh43
-rw-r--r--tools/testing/selftests/powerpc/include/utils.h5
-rw-r--r--tools/testing/selftests/powerpc/math/Makefile4
-rw-r--r--tools/testing/selftests/powerpc/math/mma.S33
-rw-r--r--tools/testing/selftests/powerpc/math/mma.c48
-rw-r--r--tools/testing/selftests/powerpc/mm/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/mm/Makefile4
-rw-r--r--tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c156
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S43
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c2
-rw-r--r--tools/testing/selftests/powerpc/security/spectre_v2.c32
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-check-branches.sh11
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-remote.sh1
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh6
-rw-r--r--tools/testing/selftests/resctrl/Makefile19
-rw-r--r--tools/testing/selftests/resctrl/README39
-rw-r--r--tools/testing/selftests/resctrl/cat_test.c2
-rw-r--r--tools/testing/selftests/resctrl/fill_buf.c4
-rw-r--r--tools/testing/selftests/resctrl/resctrl.h5
-rw-r--r--tools/testing/selftests/resctrl/resctrl_tests.c49
-rw-r--r--tools/testing/selftests/resctrl/resctrl_val.c1
-rw-r--r--tools/testing/selftests/resctrl/resctrlfs.c2
-rw-r--r--tools/testing/selftests/resctrl/settings3
-rw-r--r--tools/testing/selftests/rseq/rseq-riscv.h50
-rw-r--r--tools/testing/selftests/rseq/rseq.c3
-rw-r--r--tools/testing/selftests/safesetid/Makefile2
-rw-r--r--tools/testing/selftests/safesetid/safesetid-test.c295
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c2
-rw-r--r--tools/testing/selftests/sync/config1
-rwxr-xr-xtools/testing/selftests/sysctl/sysctl.sh23
-rw-r--r--tools/testing/selftests/tc-testing/.gitignore1
-rw-r--r--tools/testing/selftests/tc-testing/Makefile1
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/gact.json77
-rw-r--r--tools/testing/selftests/timens/Makefile2
-rw-r--r--tools/testing/selftests/timens/vfork_exec.c90
-rw-r--r--tools/testing/selftests/timers/adjtick.c2
-rw-r--r--tools/testing/selftests/timers/alarmtimer-suspend.c2
-rw-r--r--tools/testing/selftests/timers/change_skew.c2
-rw-r--r--tools/testing/selftests/timers/clocksource-switch.c71
-rw-r--r--tools/testing/selftests/timers/inconsistency-check.c32
-rw-r--r--tools/testing/selftests/timers/nanosleep.c18
-rw-r--r--tools/testing/selftests/timers/raw_skew.c2
-rw-r--r--tools/testing/selftests/timers/skew_consistency.c2
-rw-r--r--tools/testing/selftests/timers/valid-adjtimex.c2
-rw-r--r--tools/testing/selftests/tpm2/settings1
-rw-r--r--tools/testing/selftests/vm/.gitignore3
-rw-r--r--tools/testing/selftests/vm/Makefile15
-rw-r--r--tools/testing/selftests/vm/config2
-rw-r--r--tools/testing/selftests/vm/gup_test.c26
-rw-r--r--tools/testing/selftests/vm/hugepage-mremap.c6
-rw-r--r--tools/testing/selftests/vm/ksm_tests.c11
-rw-r--r--tools/testing/selftests/vm/madv_populate.c34
-rw-r--r--tools/testing/selftests/vm/migration.c193
-rw-r--r--tools/testing/selftests/vm/mrelease_test.c200
-rw-r--r--tools/testing/selftests/vm/pkey-x86.h21
-rw-r--r--tools/testing/selftests/vm/protection_keys.c2
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests.sh525
-rw-r--r--tools/testing/selftests/vm/settings1
-rw-r--r--tools/testing/selftests/vm/soft-dirty.c145
-rw-r--r--tools/testing/selftests/vm/split_huge_page_test.c79
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c15
-rw-r--r--tools/testing/selftests/vm/vm_util.c108
-rw-r--r--tools/testing/selftests/vm/vm_util.h9
-rw-r--r--tools/testing/selftests/wireguard/qemu/Makefile61
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/arm.config1
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/armeb.config1
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/i686.config8
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/m68k.config10
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/mips.config1
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/mipsel.config1
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/powerpc.config1
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/um.config3
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/x86_64.config7
-rw-r--r--tools/testing/selftests/wireguard/qemu/debug.config5
-rw-r--r--tools/testing/selftests/wireguard/qemu/init.c14
-rw-r--r--tools/testing/selftests/wireguard/qemu/kernel.config5
-rw-r--r--tools/testing/selftests/x86/amx.c24
-rw-r--r--tools/testing/selftests/x86/corrupt_xstate_header.c16
-rw-r--r--tools/thermal/lib/Build3
-rw-r--r--tools/thermal/lib/Makefile158
-rw-r--r--tools/thermal/lib/libthermal_tools.pc.template12
-rw-r--r--tools/thermal/lib/log.c77
-rw-r--r--tools/thermal/lib/log.h31
-rw-r--r--tools/thermal/lib/mainloop.c120
-rw-r--r--tools/thermal/lib/mainloop.h15
-rw-r--r--tools/thermal/lib/thermal-tools.h10
-rw-r--r--tools/thermal/lib/uptimeofday.c40
-rw-r--r--tools/thermal/lib/uptimeofday.h12
-rw-r--r--tools/thermal/thermal-engine/Build1
-rw-r--r--tools/thermal/thermal-engine/Makefile28
-rw-r--r--tools/thermal/thermal-engine/thermal-engine.c341
-rw-r--r--tools/thermal/thermometer/Build1
-rw-r--r--tools/thermal/thermometer/Makefile26
-rw-r--r--tools/thermal/thermometer/thermometer.892
-rw-r--r--tools/thermal/thermometer/thermometer.c572
-rw-r--r--tools/thermal/thermometer/thermometer.conf5
-rw-r--r--tools/thermal/tmon/pid.c2
-rw-r--r--tools/thermal/tmon/tmon.h3
-rw-r--r--tools/tracing/rtla/Makefile40
-rw-r--r--tools/tracing/rtla/README.txt13
-rw-r--r--tools/tracing/rtla/src/osnoise_hist.c5
-rw-r--r--tools/tracing/rtla/src/osnoise_top.c9
-rw-r--r--tools/tracing/rtla/src/timerlat_hist.c11
-rw-r--r--tools/tracing/rtla/src/timerlat_top.c11
-rw-r--r--tools/tracing/rtla/src/utils.c108
-rw-r--r--tools/tracing/rtla/src/utils.h3
-rw-r--r--tools/usb/testusb.c20
-rw-r--r--tools/vm/page-types.c8
-rw-r--r--tools/vm/page_owner_sort.c386
-rw-r--r--tools/vm/slabinfo.c26
1077 files changed, 71892 insertions, 19515 deletions
diff --git a/tools/Makefile b/tools/Makefile
index 724134f0e56c..e497875fc7e3 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -32,6 +32,9 @@ help:
@echo ' bootconfig - boot config tool'
@echo ' spi - spi tools'
@echo ' tmon - thermal monitoring and tuning tool'
+ @echo ' thermometer - temperature capture tool'
+ @echo ' thermal-engine - thermal monitoring tool'
+ @echo ' thermal - thermal library'
@echo ' tracing - misc tracing tools'
@echo ' turbostat - Intel CPU idle stats and freq reporting tool'
@echo ' usb - USB testing tools'
@@ -75,6 +78,9 @@ bpf/%: FORCE
libapi: FORCE
$(call descend,lib/api)
+nolibc: FORCE
+ $(call descend,include/nolibc)
+
nolibc_%: FORCE
$(call descend,include/nolibc,$(patsubst nolibc_%,%,$@))
@@ -89,12 +95,21 @@ perf: FORCE
selftests: FORCE
$(call descend,testing/$@)
+thermal: FORCE
+ $(call descend,lib/$@)
+
turbostat x86_energy_perf_policy intel-speed-select: FORCE
$(call descend,power/x86/$@)
tmon: FORCE
$(call descend,thermal/$@)
+thermometer: FORCE
+ $(call descend,thermal/$@)
+
+thermal-engine: FORCE thermal
+ $(call descend,thermal/$@)
+
freefall: FORCE
$(call descend,laptop/$@)
@@ -105,7 +120,7 @@ all: acpi cgroup counter cpupower gpio hv firewire \
perf selftests bootconfig spi turbostat usb \
virtio vm bpf x86_energy_perf_policy \
tmon freefall iio objtool kvm_stat wmi \
- pci debugging tracing
+ pci debugging tracing thermal thermometer thermal-engine
acpi_install:
$(call descend,power/$(@:_install=),install)
@@ -119,12 +134,21 @@ cgroup_install counter_install firewire_install gpio_install hv_install iio_inst
selftests_install:
$(call descend,testing/$(@:_install=),install)
+thermal_install:
+ $(call descend,lib/$(@:_install=),install)
+
turbostat_install x86_energy_perf_policy_install intel-speed-select_install:
$(call descend,power/x86/$(@:_install=),install)
tmon_install:
$(call descend,thermal/$(@:_install=),install)
+thermometer_install:
+ $(call descend,thermal/$(@:_install=),install)
+
+thermal-engine_install:
+ $(call descend,thermal/$(@:_install=),install)
+
freefall_install:
$(call descend,laptop/$(@:_install=),install)
@@ -137,7 +161,7 @@ install: acpi_install cgroup_install counter_install cpupower_install gpio_insta
virtio_install vm_install bpf_install x86_energy_perf_policy_install \
tmon_install freefall_install objtool_install kvm_stat_install \
wmi_install pci_install debugging_install intel-speed-select_install \
- tracing_install
+ tracing_install thermometer_install thermal-engine_install
acpi_clean:
$(call descend,power/acpi,clean)
@@ -164,9 +188,18 @@ perf_clean:
selftests_clean:
$(call descend,testing/$(@:_clean=),clean)
+thermal_clean:
+ $(call descend,lib/thermal,clean)
+
turbostat_clean x86_energy_perf_policy_clean intel-speed-select_clean:
$(call descend,power/x86/$(@:_clean=),clean)
+thermometer_clean:
+ $(call descend,thermal/thermometer,clean)
+
+thermal-engine_clean:
+ $(call descend,thermal/thermal-engine,clean)
+
tmon_clean:
$(call descend,thermal/tmon,clean)
@@ -181,6 +214,6 @@ clean: acpi_clean cgroup_clean counter_clean cpupower_clean hv_clean firewire_cl
vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
freefall_clean build_clean libbpf_clean libsubcmd_clean \
gpio_clean objtool_clean leds_clean wmi_clean pci_clean firmware_clean debugging_clean \
- intel-speed-select_clean tracing_clean
+ intel-speed-select_clean tracing_clean thermal_clean thermometer_clean thermal-engine_clean
.PHONY: FORCE
diff --git a/tools/accounting/.gitignore b/tools/accounting/.gitignore
index c45fb4ed4309..522a690aaf3d 100644
--- a/tools/accounting/.gitignore
+++ b/tools/accounting/.gitignore
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
getdelays
+procacct
diff --git a/tools/accounting/Makefile b/tools/accounting/Makefile
index 03687f19cbb1..11def1ad046c 100644
--- a/tools/accounting/Makefile
+++ b/tools/accounting/Makefile
@@ -2,7 +2,7 @@
CC := $(CROSS_COMPILE)gcc
CFLAGS := -I../../usr/include
-PROGS := getdelays
+PROGS := getdelays procacct
all: $(PROGS)
diff --git a/tools/accounting/getdelays.c b/tools/accounting/getdelays.c
index 11e86739456d..e83e6e47a21e 100644
--- a/tools/accounting/getdelays.c
+++ b/tools/accounting/getdelays.c
@@ -207,6 +207,8 @@ static void print_delayacct(struct taskstats *t)
"THRASHING%12s%15s%15s\n"
" %15llu%15llu%15llums\n"
"COMPACT %12s%15s%15s\n"
+ " %15llu%15llu%15llums\n"
+ "WPCOPY %12s%15s%15s\n"
" %15llu%15llu%15llums\n",
"count", "real total", "virtual total",
"delay total", "delay average",
@@ -234,7 +236,11 @@ static void print_delayacct(struct taskstats *t)
"count", "delay total", "delay average",
(unsigned long long)t->compact_count,
(unsigned long long)t->compact_delay_total,
- average_ms(t->compact_delay_total, t->compact_count));
+ average_ms(t->compact_delay_total, t->compact_count),
+ "count", "delay total", "delay average",
+ (unsigned long long)t->wpcopy_count,
+ (unsigned long long)t->wpcopy_delay_total,
+ average_ms(t->wpcopy_delay_total, t->wpcopy_count));
}
static void task_context_switch_counts(struct taskstats *t)
diff --git a/tools/accounting/procacct.c b/tools/accounting/procacct.c
new file mode 100644
index 000000000000..8353d3237e50
--- /dev/null
+++ b/tools/accounting/procacct.c
@@ -0,0 +1,417 @@
+// SPDX-License-Identifier: GPL-2.0
+/* procacct.c
+ *
+ * Demonstrator of fetching resource data on task exit, as a way
+ * to accumulate accurate program resource usage statistics, without
+ * prior identification of the programs. For that, the fields for
+ * device and inode of the program executable binary file are also
+ * extracted in addition to the command string.
+ *
+ * The TGID together with the PID and the AGROUP flag allow
+ * identification of threads in a process and single-threaded processes.
+ * The ac_tgetime field gives proper whole-process walltime.
+ *
+ * Written (changed) by Thomas Orgis, University of Hamburg in 2022
+ *
+ * This is a cheap derivation (inheriting the style) of getdelays.c:
+ *
+ * Utility to get per-pid and per-tgid delay accounting statistics
+ * Also illustrates usage of the taskstats interface
+ *
+ * Copyright (C) Shailabh Nagar, IBM Corp. 2005
+ * Copyright (C) Balbir Singh, IBM Corp. 2006
+ * Copyright (c) Jay Lan, SGI. 2006
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <unistd.h>
+#include <poll.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/socket.h>
+#include <sys/wait.h>
+#include <signal.h>
+
+#include <linux/genetlink.h>
+#include <linux/acct.h>
+#include <linux/taskstats.h>
+#include <linux/kdev_t.h>
+
+/*
+ * Generic macros for dealing with netlink sockets. Might be duplicated
+ * elsewhere. It is recommended that commercial grade applications use
+ * libnl or libnetlink and use the interfaces provided by the library
+ */
+#define GENLMSG_DATA(glh) ((void *)(NLMSG_DATA(glh) + GENL_HDRLEN))
+#define GENLMSG_PAYLOAD(glh) (NLMSG_PAYLOAD(glh, 0) - GENL_HDRLEN)
+#define NLA_DATA(na) ((void *)((char *)(na) + NLA_HDRLEN))
+#define NLA_PAYLOAD(len) (len - NLA_HDRLEN)
+
+#define err(code, fmt, arg...) \
+ do { \
+ fprintf(stderr, fmt, ##arg); \
+ exit(code); \
+ } while (0)
+
+int rcvbufsz;
+char name[100];
+int dbg;
+int print_delays;
+int print_io_accounting;
+int print_task_context_switch_counts;
+
+#define PRINTF(fmt, arg...) { \
+ if (dbg) { \
+ printf(fmt, ##arg); \
+ } \
+ }
+
+/* Maximum size of response requested or message sent */
+#define MAX_MSG_SIZE 1024
+/* Maximum number of cpus expected to be specified in a cpumask */
+#define MAX_CPUS 32
+
+struct msgtemplate {
+ struct nlmsghdr n;
+ struct genlmsghdr g;
+ char buf[MAX_MSG_SIZE];
+};
+
+char cpumask[100+6*MAX_CPUS];
+
+static void usage(void)
+{
+ fprintf(stderr, "procacct [-v] [-w logfile] [-r bufsize] [-m cpumask]\n");
+ fprintf(stderr, " -v: debug on\n");
+}
+
+/*
+ * Create a raw netlink socket and bind
+ */
+static int create_nl_socket(int protocol)
+{
+ int fd;
+ struct sockaddr_nl local;
+
+ fd = socket(AF_NETLINK, SOCK_RAW, protocol);
+ if (fd < 0)
+ return -1;
+
+ if (rcvbufsz)
+ if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF,
+ &rcvbufsz, sizeof(rcvbufsz)) < 0) {
+ fprintf(stderr, "Unable to set socket rcv buf size to %d\n",
+ rcvbufsz);
+ goto error;
+ }
+
+ memset(&local, 0, sizeof(local));
+ local.nl_family = AF_NETLINK;
+
+ if (bind(fd, (struct sockaddr *) &local, sizeof(local)) < 0)
+ goto error;
+
+ return fd;
+error:
+ close(fd);
+ return -1;
+}
+
+
+static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
+ __u8 genl_cmd, __u16 nla_type,
+ void *nla_data, int nla_len)
+{
+ struct nlattr *na;
+ struct sockaddr_nl nladdr;
+ int r, buflen;
+ char *buf;
+
+ struct msgtemplate msg;
+
+ msg.n.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN);
+ msg.n.nlmsg_type = nlmsg_type;
+ msg.n.nlmsg_flags = NLM_F_REQUEST;
+ msg.n.nlmsg_seq = 0;
+ msg.n.nlmsg_pid = nlmsg_pid;
+ msg.g.cmd = genl_cmd;
+ msg.g.version = 0x1;
+ na = (struct nlattr *) GENLMSG_DATA(&msg);
+ na->nla_type = nla_type;
+ na->nla_len = nla_len + 1 + NLA_HDRLEN;
+ memcpy(NLA_DATA(na), nla_data, nla_len);
+ msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
+
+ buf = (char *) &msg;
+ buflen = msg.n.nlmsg_len;
+ memset(&nladdr, 0, sizeof(nladdr));
+ nladdr.nl_family = AF_NETLINK;
+ while ((r = sendto(sd, buf, buflen, 0, (struct sockaddr *) &nladdr,
+ sizeof(nladdr))) < buflen) {
+ if (r > 0) {
+ buf += r;
+ buflen -= r;
+ } else if (errno != EAGAIN)
+ return -1;
+ }
+ return 0;
+}
+
+
+/*
+ * Probe the controller in genetlink to find the family id
+ * for the TASKSTATS family
+ */
+static int get_family_id(int sd)
+{
+ struct {
+ struct nlmsghdr n;
+ struct genlmsghdr g;
+ char buf[256];
+ } ans;
+
+ int id = 0, rc;
+ struct nlattr *na;
+ int rep_len;
+
+ strcpy(name, TASKSTATS_GENL_NAME);
+ rc = send_cmd(sd, GENL_ID_CTRL, getpid(), CTRL_CMD_GETFAMILY,
+ CTRL_ATTR_FAMILY_NAME, (void *)name,
+ strlen(TASKSTATS_GENL_NAME)+1);
+ if (rc < 0)
+ return 0; /* sendto() failure? */
+
+ rep_len = recv(sd, &ans, sizeof(ans), 0);
+ if (ans.n.nlmsg_type == NLMSG_ERROR ||
+ (rep_len < 0) || !NLMSG_OK((&ans.n), rep_len))
+ return 0;
+
+ na = (struct nlattr *) GENLMSG_DATA(&ans);
+ na = (struct nlattr *) ((char *) na + NLA_ALIGN(na->nla_len));
+ if (na->nla_type == CTRL_ATTR_FAMILY_ID)
+ id = *(__u16 *) NLA_DATA(na);
+
+ return id;
+}
+
+#define average_ms(t, c) (t / 1000000ULL / (c ? c : 1))
+
+static void print_procacct(struct taskstats *t)
+{
+ /* First letter: T is a mere thread, G the last in a group, U unknown. */
+ printf(
+ "%c pid=%lu tgid=%lu uid=%lu wall=%llu gwall=%llu cpu=%llu vmpeak=%llu rsspeak=%llu dev=%lu:%lu inode=%llu comm=%s\n"
+ , t->version >= 12 ? (t->ac_flag & AGROUP ? 'P' : 'T') : '?'
+ , (unsigned long)t->ac_pid
+ , (unsigned long)(t->version >= 12 ? t->ac_tgid : 0)
+ , (unsigned long)t->ac_uid
+ , (unsigned long long)t->ac_etime
+ , (unsigned long long)(t->version >= 12 ? t->ac_tgetime : 0)
+ , (unsigned long long)(t->ac_utime+t->ac_stime)
+ , (unsigned long long)t->hiwater_vm
+ , (unsigned long long)t->hiwater_rss
+ , (unsigned long)(t->version >= 12 ? MAJOR(t->ac_exe_dev) : 0)
+ , (unsigned long)(t->version >= 12 ? MINOR(t->ac_exe_dev) : 0)
+ , (unsigned long long)(t->version >= 12 ? t->ac_exe_inode : 0)
+ , t->ac_comm
+ );
+}
+
+void handle_aggr(int mother, struct nlattr *na, int fd)
+{
+ int aggr_len = NLA_PAYLOAD(na->nla_len);
+ int len2 = 0;
+ pid_t rtid = 0;
+
+ na = (struct nlattr *) NLA_DATA(na);
+ while (len2 < aggr_len) {
+ switch (na->nla_type) {
+ case TASKSTATS_TYPE_PID:
+ rtid = *(int *) NLA_DATA(na);
+ PRINTF("PID\t%d\n", rtid);
+ break;
+ case TASKSTATS_TYPE_TGID:
+ rtid = *(int *) NLA_DATA(na);
+ PRINTF("TGID\t%d\n", rtid);
+ break;
+ case TASKSTATS_TYPE_STATS:
+ if (mother == TASKSTATS_TYPE_AGGR_PID)
+ print_procacct((struct taskstats *) NLA_DATA(na));
+ if (fd) {
+ if (write(fd, NLA_DATA(na), na->nla_len) < 0)
+ err(1, "write error\n");
+ }
+ break;
+ case TASKSTATS_TYPE_NULL:
+ break;
+ default:
+ fprintf(stderr, "Unknown nested nla_type %d\n",
+ na->nla_type);
+ break;
+ }
+ len2 += NLA_ALIGN(na->nla_len);
+ na = (struct nlattr *)((char *)na +
+ NLA_ALIGN(na->nla_len));
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ int c, rc, rep_len, aggr_len, len2;
+ int cmd_type = TASKSTATS_CMD_ATTR_UNSPEC;
+ __u16 id;
+ __u32 mypid;
+
+ struct nlattr *na;
+ int nl_sd = -1;
+ int len = 0;
+ pid_t tid = 0;
+
+ int fd = 0;
+ int write_file = 0;
+ int maskset = 0;
+ char *logfile = NULL;
+ int containerset = 0;
+ char *containerpath = NULL;
+ int cfd = 0;
+ int forking = 0;
+ sigset_t sigset;
+
+ struct msgtemplate msg;
+
+ while (!forking) {
+ c = getopt(argc, argv, "m:vr:");
+ if (c < 0)
+ break;
+
+ switch (c) {
+ case 'w':
+ logfile = strdup(optarg);
+ printf("write to file %s\n", logfile);
+ write_file = 1;
+ break;
+ case 'r':
+ rcvbufsz = atoi(optarg);
+ printf("receive buf size %d\n", rcvbufsz);
+ if (rcvbufsz < 0)
+ err(1, "Invalid rcv buf size\n");
+ break;
+ case 'm':
+ strncpy(cpumask, optarg, sizeof(cpumask));
+ cpumask[sizeof(cpumask) - 1] = '\0';
+ maskset = 1;
+ break;
+ case 'v':
+ printf("debug on\n");
+ dbg = 1;
+ break;
+ default:
+ usage();
+ exit(-1);
+ }
+ }
+ if (!maskset) {
+ maskset = 1;
+ strncpy(cpumask, "1", sizeof(cpumask));
+ cpumask[sizeof(cpumask) - 1] = '\0';
+ }
+ printf("cpumask %s maskset %d\n", cpumask, maskset);
+
+ if (write_file) {
+ fd = open(logfile, O_WRONLY | O_CREAT | O_TRUNC, 0644);
+ if (fd == -1) {
+ perror("Cannot open output file\n");
+ exit(1);
+ }
+ }
+
+ nl_sd = create_nl_socket(NETLINK_GENERIC);
+ if (nl_sd < 0)
+ err(1, "error creating Netlink socket\n");
+
+ mypid = getpid();
+ id = get_family_id(nl_sd);
+ if (!id) {
+ fprintf(stderr, "Error getting family id, errno %d\n", errno);
+ goto err;
+ }
+ PRINTF("family id %d\n", id);
+
+ if (maskset) {
+ rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
+ TASKSTATS_CMD_ATTR_REGISTER_CPUMASK,
+ &cpumask, strlen(cpumask) + 1);
+ PRINTF("Sent register cpumask, retval %d\n", rc);
+ if (rc < 0) {
+ fprintf(stderr, "error sending register cpumask\n");
+ goto err;
+ }
+ }
+
+ do {
+ rep_len = recv(nl_sd, &msg, sizeof(msg), 0);
+ PRINTF("received %d bytes\n", rep_len);
+
+ if (rep_len < 0) {
+ fprintf(stderr, "nonfatal reply error: errno %d\n",
+ errno);
+ continue;
+ }
+ if (msg.n.nlmsg_type == NLMSG_ERROR ||
+ !NLMSG_OK((&msg.n), rep_len)) {
+ struct nlmsgerr *err = NLMSG_DATA(&msg);
+
+ fprintf(stderr, "fatal reply error, errno %d\n",
+ err->error);
+ goto done;
+ }
+
+ PRINTF("nlmsghdr size=%zu, nlmsg_len=%d, rep_len=%d\n",
+ sizeof(struct nlmsghdr), msg.n.nlmsg_len, rep_len);
+
+
+ rep_len = GENLMSG_PAYLOAD(&msg.n);
+
+ na = (struct nlattr *) GENLMSG_DATA(&msg);
+ len = 0;
+ while (len < rep_len) {
+ len += NLA_ALIGN(na->nla_len);
+ int mother = na->nla_type;
+
+ PRINTF("mother=%i\n", mother);
+ switch (na->nla_type) {
+ case TASKSTATS_TYPE_AGGR_PID:
+ case TASKSTATS_TYPE_AGGR_TGID:
+ /* For nested attributes, na follows */
+ handle_aggr(mother, na, fd);
+ break;
+ default:
+ fprintf(stderr, "Unexpected nla_type %d\n",
+ na->nla_type);
+ case TASKSTATS_TYPE_NULL:
+ break;
+ }
+ na = (struct nlattr *) (GENLMSG_DATA(&msg) + len);
+ }
+ } while (1);
+done:
+ if (maskset) {
+ rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
+ TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK,
+ &cpumask, strlen(cpumask) + 1);
+ printf("Sent deregister mask, retval %d\n", rc);
+ if (rc < 0)
+ err(rc, "error sending deregister cpumask\n");
+ }
+err:
+ close(nl_sd);
+ if (fd)
+ close(fd);
+ if (cfd)
+ close(cfd);
+ return 0;
+}
diff --git a/tools/arch/arm64/include/asm/cputype.h b/tools/arch/arm64/include/asm/cputype.h
index e09d6908a21d..8aa0d276a636 100644
--- a/tools/arch/arm64/include/asm/cputype.h
+++ b/tools/arch/arm64/include/asm/cputype.h
@@ -36,7 +36,7 @@
#define MIDR_VARIANT(midr) \
(((midr) & MIDR_VARIANT_MASK) >> MIDR_VARIANT_SHIFT)
#define MIDR_IMPLEMENTOR_SHIFT 24
-#define MIDR_IMPLEMENTOR_MASK (0xff << MIDR_IMPLEMENTOR_SHIFT)
+#define MIDR_IMPLEMENTOR_MASK (0xffU << MIDR_IMPLEMENTOR_SHIFT)
#define MIDR_IMPLEMENTOR(midr) \
(((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
@@ -118,6 +118,10 @@
#define APPLE_CPU_PART_M1_ICESTORM 0x022
#define APPLE_CPU_PART_M1_FIRESTORM 0x023
+#define APPLE_CPU_PART_M1_ICESTORM_PRO 0x024
+#define APPLE_CPU_PART_M1_FIRESTORM_PRO 0x025
+#define APPLE_CPU_PART_M1_ICESTORM_MAX 0x028
+#define APPLE_CPU_PART_M1_FIRESTORM_MAX 0x029
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
@@ -164,6 +168,10 @@
#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
#define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM)
#define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM)
+#define MIDR_APPLE_M1_ICESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_PRO)
+#define MIDR_APPLE_M1_FIRESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_PRO)
+#define MIDR_APPLE_M1_ICESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_MAX)
+#define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX)
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
#define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX
@@ -172,7 +180,7 @@
#ifndef __ASSEMBLY__
-#include "sysreg.h"
+#include <asm/sysreg.h>
#define read_cpuid(reg) read_sysreg_s(SYS_ ## reg)
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index c1b6ddc02d2f..3bb134355874 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -139,8 +139,10 @@ struct kvm_guest_debug_arch {
__u64 dbg_wvr[KVM_ARM_MAX_DBG_REGS];
};
+#define KVM_DEBUG_ARCH_HSR_HIGH_VALID (1 << 0)
struct kvm_debug_exit_arch {
__u32 hsr;
+ __u32 hsr_high; /* ESR_EL2[61:32] */
__u64 far; /* used for watchpoints */
};
@@ -332,6 +334,40 @@ struct kvm_arm_copy_mte_tags {
#define KVM_ARM64_SVE_VLS_WORDS \
((KVM_ARM64_SVE_VQ_MAX - KVM_ARM64_SVE_VQ_MIN) / 64 + 1)
+/* Bitmap feature firmware registers */
+#define KVM_REG_ARM_FW_FEAT_BMAP (0x0016 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_FW_FEAT_BMAP_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
+ KVM_REG_ARM_FW_FEAT_BMAP | \
+ ((r) & 0xffff))
+
+#define KVM_REG_ARM_STD_BMAP KVM_REG_ARM_FW_FEAT_BMAP_REG(0)
+
+enum {
+ KVM_REG_ARM_STD_BIT_TRNG_V1_0 = 0,
+#ifdef __KERNEL__
+ KVM_REG_ARM_STD_BMAP_BIT_COUNT,
+#endif
+};
+
+#define KVM_REG_ARM_STD_HYP_BMAP KVM_REG_ARM_FW_FEAT_BMAP_REG(1)
+
+enum {
+ KVM_REG_ARM_STD_HYP_BIT_PV_TIME = 0,
+#ifdef __KERNEL__
+ KVM_REG_ARM_STD_HYP_BMAP_BIT_COUNT,
+#endif
+};
+
+#define KVM_REG_ARM_VENDOR_HYP_BMAP KVM_REG_ARM_FW_FEAT_BMAP_REG(2)
+
+enum {
+ KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT = 0,
+ KVM_REG_ARM_VENDOR_HYP_BIT_PTP = 1,
+#ifdef __KERNEL__
+ KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT,
+#endif
+};
+
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
diff --git a/tools/arch/arm64/include/uapi/asm/perf_regs.h b/tools/arch/arm64/include/uapi/asm/perf_regs.h
index d54daafa89e3..fd157f46727e 100644
--- a/tools/arch/arm64/include/uapi/asm/perf_regs.h
+++ b/tools/arch/arm64/include/uapi/asm/perf_regs.h
@@ -36,6 +36,11 @@ enum perf_event_arm_regs {
PERF_REG_ARM64_LR,
PERF_REG_ARM64_SP,
PERF_REG_ARM64_PC,
- PERF_REG_ARM64_MAX,
+
+ /* Extended/pseudo registers */
+ PERF_REG_ARM64_VG = 46, // SVE Vector Granule
+
+ PERF_REG_ARM64_MAX = PERF_REG_ARM64_PC + 1,
+ PERF_REG_ARM64_EXTENDED_MAX = PERF_REG_ARM64_VG + 1
};
#endif /* _ASM_ARM64_PERF_REGS_H */
diff --git a/tools/arch/h8300/include/asm/bitsperlong.h b/tools/arch/h8300/include/asm/bitsperlong.h
deleted file mode 100644
index fa1508337ffc..000000000000
--- a/tools/arch/h8300/include/asm/bitsperlong.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_H8300_BITS_PER_LONG
-#define __ASM_H8300_BITS_PER_LONG
-
-#include <asm-generic/bitsperlong.h>
-
-#if !defined(__ASSEMBLY__)
-/* h8300-unknown-linux required long */
-#define __kernel_size_t __kernel_size_t
-typedef unsigned long __kernel_size_t;
-typedef long __kernel_ssize_t;
-typedef long __kernel_ptrdiff_t;
-#endif
-
-#endif /* __ASM_H8300_BITS_PER_LONG */
diff --git a/tools/arch/h8300/include/uapi/asm/mman.h b/tools/arch/h8300/include/uapi/asm/mman.h
deleted file mode 100644
index be7bbe0528d1..000000000000
--- a/tools/arch/h8300/include/uapi/asm/mman.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef TOOLS_ARCH_H8300_UAPI_ASM_MMAN_FIX_H
-#define TOOLS_ARCH_H8300_UAPI_ASM_MMAN_FIX_H
-#include <uapi/asm-generic/mman.h>
-/* MAP_32BIT is undefined on h8300, fix it for perf */
-#define MAP_32BIT 0
-#endif
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 73e643ae94b6..a77b915d36a8 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -201,17 +201,17 @@
#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
-/* FREE! ( 7*32+10) */
+#define X86_FEATURE_XCOMPACTED ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */
#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
-#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
-#define X86_FEATURE_RETPOLINE_LFENCE ( 7*32+13) /* "" Use LFENCE for Spectre variant 2 */
+#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */
+#define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* "" Fill RSB on VM-Exit */
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
-/* FREE! ( 7*32+20) */
+#define X86_FEATURE_PERFMON_V2 ( 7*32+20) /* AMD Performance Monitoring Version 2 */
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
@@ -238,6 +238,7 @@
#define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */
#define X86_FEATURE_PVUNLOCK ( 8*32+20) /* "" PV unlock function */
#define X86_FEATURE_VCPUPREEMPT ( 8*32+21) /* "" PV vcpu_is_preempted function */
+#define X86_FEATURE_TDX_GUEST ( 8*32+22) /* Intel Trust Domain Extensions Guest */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
@@ -295,6 +296,13 @@
#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
#define X86_FEATURE_SGX1 (11*32+ 8) /* "" Basic SGX */
#define X86_FEATURE_SGX2 (11*32+ 9) /* "" SGX Enclave Dynamic Memory Management (EDMM) */
+#define X86_FEATURE_ENTRY_IBPB (11*32+10) /* "" Issue an IBPB on kernel entry */
+#define X86_FEATURE_RRSBA_CTRL (11*32+11) /* "" RET prediction control */
+#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
+#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
+#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
+#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
@@ -315,6 +323,8 @@
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
#define X86_FEATURE_CPPC (13*32+27) /* Collaborative Processor Performance Control */
+#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
+#define X86_FEATURE_BRS (13*32+31) /* Branch Sampling available */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
@@ -405,6 +415,7 @@
#define X86_FEATURE_SEV (19*32+ 1) /* AMD Secure Encrypted Virtualization */
#define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* "" VM Page Flush MSR is supported */
#define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
+#define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */
#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
/*
@@ -443,5 +454,7 @@
#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
+#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
+#define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index 1ae0fab7d902..33d2cd04d254 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -50,6 +50,25 @@
# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
#endif
+#ifdef CONFIG_RETPOLINE
+# define DISABLE_RETPOLINE 0
+#else
+# define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \
+ (1 << (X86_FEATURE_RETPOLINE_LFENCE & 31)))
+#endif
+
+#ifdef CONFIG_RETHUNK
+# define DISABLE_RETHUNK 0
+#else
+# define DISABLE_RETHUNK (1 << (X86_FEATURE_RETHUNK & 31))
+#endif
+
+#ifdef CONFIG_CPU_UNRET_ENTRY
+# define DISABLE_UNRET 0
+#else
+# define DISABLE_UNRET (1 << (X86_FEATURE_UNRET & 31))
+#endif
+
#ifdef CONFIG_INTEL_IOMMU_SVM
# define DISABLE_ENQCMD 0
#else
@@ -62,6 +81,12 @@
# define DISABLE_SGX (1 << (X86_FEATURE_SGX & 31))
#endif
+#ifdef CONFIG_INTEL_TDX_GUEST
+# define DISABLE_TDX_GUEST 0
+#else
+# define DISABLE_TDX_GUEST (1 << (X86_FEATURE_TDX_GUEST & 31))
+#endif
+
/*
* Make sure to add features to the correct mask
*/
@@ -73,10 +98,10 @@
#define DISABLED_MASK5 0
#define DISABLED_MASK6 0
#define DISABLED_MASK7 (DISABLE_PTI)
-#define DISABLED_MASK8 0
+#define DISABLED_MASK8 (DISABLE_TDX_GUEST)
#define DISABLED_MASK9 (DISABLE_SGX)
#define DISABLED_MASK10 0
-#define DISABLED_MASK11 0
+#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET)
#define DISABLED_MASK12 0
#define DISABLED_MASK13 0
#define DISABLED_MASK14 0
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index ee15311b6be1..cc615be27a54 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -51,6 +51,8 @@
#define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
+#define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */
+#define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
@@ -76,6 +78,8 @@
/* Abbreviated from Intel SDM name IA32_CORE_CAPABILITIES */
#define MSR_IA32_CORE_CAPS 0x000000cf
+#define MSR_IA32_CORE_CAPS_INTEGRITY_CAPS_BIT 2
+#define MSR_IA32_CORE_CAPS_INTEGRITY_CAPS BIT(MSR_IA32_CORE_CAPS_INTEGRITY_CAPS_BIT)
#define MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT_BIT 5
#define MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT BIT(MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT_BIT)
@@ -91,6 +95,7 @@
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
#define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */
#define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */
+#define ARCH_CAP_RSBA BIT(2) /* RET may use alternative branch predictors */
#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */
#define ARCH_CAP_SSB_NO BIT(4) /*
* Not susceptible to Speculative Store Bypass
@@ -114,6 +119,37 @@
* Not susceptible to
* TSX Async Abort (TAA) vulnerabilities.
*/
+#define ARCH_CAP_SBDR_SSDP_NO BIT(13) /*
+ * Not susceptible to SBDR and SSDP
+ * variants of Processor MMIO stale data
+ * vulnerabilities.
+ */
+#define ARCH_CAP_FBSDP_NO BIT(14) /*
+ * Not susceptible to FBSDP variant of
+ * Processor MMIO stale data
+ * vulnerabilities.
+ */
+#define ARCH_CAP_PSDP_NO BIT(15) /*
+ * Not susceptible to PSDP variant of
+ * Processor MMIO stale data
+ * vulnerabilities.
+ */
+#define ARCH_CAP_FB_CLEAR BIT(17) /*
+ * VERW clears CPU fill buffer
+ * even on MDS_NO CPUs.
+ */
+#define ARCH_CAP_FB_CLEAR_CTRL BIT(18) /*
+ * MSR_IA32_MCU_OPT_CTRL[FB_CLEAR_DIS]
+ * bit available to control VERW
+ * behavior.
+ */
+#define ARCH_CAP_RRSBA BIT(19) /*
+ * Indicates RET may use predictors
+ * other than the RSB. With eIBRS
+ * enabled predictions in kernel mode
+ * are restricted to targets in
+ * kernel.
+ */
#define MSR_IA32_FLUSH_CMD 0x0000010b
#define L1D_FLUSH BIT(0) /*
@@ -131,6 +167,7 @@
#define MSR_IA32_MCU_OPT_CTRL 0x00000123
#define RNGDS_MITG_DIS BIT(0) /* SRBDS support */
#define RTM_ALLOW BIT(1) /* TSX development mode */
+#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */
#define MSR_IA32_SYSENTER_CS 0x00000174
#define MSR_IA32_SYSENTER_ESP 0x00000175
@@ -154,6 +191,11 @@
#define MSR_IA32_POWER_CTL 0x000001fc
#define MSR_IA32_POWER_CTL_BIT_EE 19
+/* Abbreviated from Intel SDM name IA32_INTEGRITY_CAPABILITIES */
+#define MSR_INTEGRITY_CAPS 0x000002d9
+#define MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT 4
+#define MSR_INTEGRITY_CAPS_PERIODIC_BIST BIT(MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT)
+
#define MSR_LBR_NHM_FROM 0x00000680
#define MSR_LBR_NHM_TO 0x000006c0
#define MSR_LBR_CORE_FROM 0x00000040
@@ -312,6 +354,7 @@
/* Run Time Average Power Limiting (RAPL) Interface */
+#define MSR_VR_CURRENT_CONFIG 0x00000601
#define MSR_RAPL_POWER_UNIT 0x00000606
#define MSR_PKG_POWER_LIMIT 0x00000610
@@ -502,8 +545,10 @@
#define MSR_AMD64_SEV 0xc0010131
#define MSR_AMD64_SEV_ENABLED_BIT 0
#define MSR_AMD64_SEV_ES_ENABLED_BIT 1
+#define MSR_AMD64_SEV_SNP_ENABLED_BIT 2
#define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
#define MSR_AMD64_SEV_ES_ENABLED BIT_ULL(MSR_AMD64_SEV_ES_ENABLED_BIT)
+#define MSR_AMD64_SEV_SNP_ENABLED BIT_ULL(MSR_AMD64_SEV_SNP_ENABLED_BIT)
#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
@@ -524,9 +569,17 @@
#define AMD_CPPC_DES_PERF(x) (((x) & 0xff) << 16)
#define AMD_CPPC_ENERGY_PERF_PREF(x) (((x) & 0xff) << 24)
+/* AMD Performance Counter Global Status and Control MSRs */
+#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS 0xc0000300
+#define MSR_AMD64_PERF_CNTR_GLOBAL_CTL 0xc0000301
+#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR 0xc0000302
+
/* Fam 17h MSRs */
#define MSR_F17H_IRPERF 0xc00000e9
+#define MSR_ZEN2_SPECTRAL_CHICKEN 0xc00110e3
+#define MSR_ZEN2_SPECTRAL_CHICKEN_BIT BIT_ULL(1)
+
/* Fam 16h MSRs */
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
#define MSR_F16H_L2I_PERF_CTR 0xc0010231
@@ -688,6 +741,10 @@
#define MSR_IA32_PERF_CTL 0x00000199
#define INTEL_PERF_CTL_MASK 0xffff
+/* AMD Branch Sampling configuration */
+#define MSR_AMD_DBG_EXTN_CFG 0xc000010f
+#define MSR_AMD_SAMP_BR_FROM 0xc0010300
+
#define MSR_IA32_MPERF 0x000000e7
#define MSR_IA32_APERF 0x000000e8
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index bf6e96011dfe..ec53c9fa1da9 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -198,13 +198,13 @@ struct kvm_msrs {
__u32 nmsrs; /* number of msrs in entries */
__u32 pad;
- struct kvm_msr_entry entries[0];
+ struct kvm_msr_entry entries[];
};
/* for KVM_GET_MSR_INDEX_LIST */
struct kvm_msr_list {
__u32 nmsrs; /* number of msrs in entries */
- __u32 indices[0];
+ __u32 indices[];
};
/* Maximum size of any access bitmap in bytes */
@@ -241,7 +241,7 @@ struct kvm_cpuid_entry {
struct kvm_cpuid {
__u32 nent;
__u32 padding;
- struct kvm_cpuid_entry entries[0];
+ struct kvm_cpuid_entry entries[];
};
struct kvm_cpuid_entry2 {
@@ -263,7 +263,7 @@ struct kvm_cpuid_entry2 {
struct kvm_cpuid2 {
__u32 nent;
__u32 padding;
- struct kvm_cpuid_entry2 entries[0];
+ struct kvm_cpuid_entry2 entries[];
};
/* for KVM_GET_PIT and KVM_SET_PIT */
@@ -389,7 +389,7 @@ struct kvm_xsave {
* the contents of CPUID leaf 0xD on the host.
*/
__u32 region[1024];
- __u32 extra[0];
+ __u32 extra[];
};
#define KVM_MAX_XCRS 16
@@ -428,11 +428,12 @@ struct kvm_sync_regs {
struct kvm_vcpu_events events;
};
-#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
-#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
-#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
-#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
-#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
+#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
+#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
+#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
+#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
+#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
+#define KVM_X86_QUIRK_FIX_HYPERCALL_INSN (1 << 5)
#define KVM_STATE_NESTED_FORMAT_VMX 0
#define KVM_STATE_NESTED_FORMAT_SVM 1
@@ -515,7 +516,7 @@ struct kvm_pmu_event_filter {
__u32 fixed_counter_bitmap;
__u32 flags;
__u32 pad[4];
- __u64 events[0];
+ __u64 events[];
};
#define KVM_PMU_EVENT_ALLOW 0
diff --git a/tools/arch/x86/include/uapi/asm/svm.h b/tools/arch/x86/include/uapi/asm/svm.h
index efa969325ede..f69c168391aa 100644
--- a/tools/arch/x86/include/uapi/asm/svm.h
+++ b/tools/arch/x86/include/uapi/asm/svm.h
@@ -108,6 +108,14 @@
#define SVM_VMGEXIT_AP_JUMP_TABLE 0x80000005
#define SVM_VMGEXIT_SET_AP_JUMP_TABLE 0
#define SVM_VMGEXIT_GET_AP_JUMP_TABLE 1
+#define SVM_VMGEXIT_PSC 0x80000010
+#define SVM_VMGEXIT_GUEST_REQUEST 0x80000011
+#define SVM_VMGEXIT_EXT_GUEST_REQUEST 0x80000012
+#define SVM_VMGEXIT_AP_CREATION 0x80000013
+#define SVM_VMGEXIT_AP_CREATE_ON_INIT 0
+#define SVM_VMGEXIT_AP_CREATE 1
+#define SVM_VMGEXIT_AP_DESTROY 2
+#define SVM_VMGEXIT_HV_FEATURES 0x8000fffd
#define SVM_VMGEXIT_UNSUPPORTED_EVENT 0x8000ffff
/* Exit code reserved for hypervisor/software use */
@@ -218,6 +226,11 @@
{ SVM_VMGEXIT_NMI_COMPLETE, "vmgexit_nmi_complete" }, \
{ SVM_VMGEXIT_AP_HLT_LOOP, "vmgexit_ap_hlt_loop" }, \
{ SVM_VMGEXIT_AP_JUMP_TABLE, "vmgexit_ap_jump_table" }, \
+ { SVM_VMGEXIT_PSC, "vmgexit_page_state_change" }, \
+ { SVM_VMGEXIT_GUEST_REQUEST, "vmgexit_guest_request" }, \
+ { SVM_VMGEXIT_EXT_GUEST_REQUEST, "vmgexit_ext_guest_request" }, \
+ { SVM_VMGEXIT_AP_CREATION, "vmgexit_ap_creation" }, \
+ { SVM_VMGEXIT_HV_FEATURES, "vmgexit_hypervisor_feature" }, \
{ SVM_EXIT_ERR, "invalid_guest_state" }
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
index a17e9aa314fd..bd015ec9847b 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -31,11 +31,17 @@ CGROUP COMMANDS
| **bpftool** **cgroup help**
|
| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
-| *ATTACH_TYPE* := { **ingress** | **egress** | **sock_create** | **sock_ops** | **device** |
-| **bind4** | **bind6** | **post_bind4** | **post_bind6** | **connect4** | **connect6** |
-| **getpeername4** | **getpeername6** | **getsockname4** | **getsockname6** | **sendmsg4** |
-| **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** | **getsockopt** | **setsockopt** |
-| **sock_release** }
+| *ATTACH_TYPE* := { **cgroup_inet_ingress** | **cgroup_inet_egress** |
+| **cgroup_inet_sock_create** | **cgroup_sock_ops** |
+| **cgroup_device** | **cgroup_inet4_bind** | **cgroup_inet6_bind** |
+| **cgroup_inet4_post_bind** | **cgroup_inet6_post_bind** |
+| **cgroup_inet4_connect** | **cgroup_inet6_connect** |
+| **cgroup_inet4_getpeername** | **cgroup_inet6_getpeername** |
+| **cgroup_inet4_getsockname** | **cgroup_inet6_getsockname** |
+| **cgroup_udp4_sendmsg** | **cgroup_udp6_sendmsg** |
+| **cgroup_udp4_recvmsg** | **cgroup_udp6_recvmsg** |
+| **cgroup_sysctl** | **cgroup_getsockopt** | **cgroup_setsockopt** |
+| **cgroup_inet_sock_release** }
| *ATTACH_FLAGS* := { **multi** | **override** }
DESCRIPTION
diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
index 4ce9a77bc1e0..e44039f89be7 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-feature.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
@@ -24,9 +24,11 @@ FEATURE COMMANDS
================
| **bpftool** **feature probe** [*COMPONENT*] [**full**] [**unprivileged**] [**macros** [**prefix** *PREFIX*]]
+| **bpftool** **feature list_builtins** *GROUP*
| **bpftool** **feature help**
|
| *COMPONENT* := { **kernel** | **dev** *NAME* }
+| *GROUP* := { **prog_types** | **map_types** | **attach_types** | **link_types** | **helpers** }
DESCRIPTION
===========
@@ -70,6 +72,16 @@ DESCRIPTION
The keywords **full**, **macros** and **prefix** have the
same role as when probing the kernel.
+ **bpftool feature list_builtins** *GROUP*
+ List items known to bpftool. These can be BPF program types
+ (**prog_types**), BPF map types (**map_types**), attach types
+ (**attach_types**), link types (**link_types**), or BPF helper
+ functions (**helpers**). The command does not probe the system, but
+ simply lists the elements that bpftool knows from compilation time,
+ as provided from libbpf (for all object types) or from the BPF UAPI
+ header (list of helpers). This can be used in scripts to iterate over
+ BPF types or helpers.
+
**bpftool feature help**
Print short help message.
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index a2e9359e554c..eb1b2a254eb1 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -53,8 +53,9 @@ PROG COMMANDS
| **cgroup/getsockopt** | **cgroup/setsockopt** | **cgroup/sock_release** |
| **struct_ops** | **fentry** | **fexit** | **freplace** | **sk_lookup**
| }
-| *ATTACH_TYPE* := {
-| **msg_verdict** | **skb_verdict** | **stream_verdict** | **stream_parser** | **flow_dissector**
+| *ATTACH_TYPE* := {
+| **sk_msg_verdict** | **sk_skb_verdict** | **sk_skb_stream_verdict** |
+| **sk_skb_stream_parser** | **flow_dissector**
| }
| *METRICs* := {
| **cycles** | **instructions** | **l1d_loads** | **llc_misses** |
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index c6d2c77d0252..6b5b3a99f79d 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -53,7 +53,7 @@ $(LIBBPF_INTERNAL_HDRS): $(LIBBPF_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_HDRS_
$(LIBBPF_BOOTSTRAP): $(wildcard $(BPF_DIR)/*.[ch] $(BPF_DIR)/Makefile) | $(LIBBPF_BOOTSTRAP_OUTPUT)
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_BOOTSTRAP_OUTPUT) \
DESTDIR=$(LIBBPF_BOOTSTRAP_DESTDIR:/=) prefix= \
- ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD) $@ install_headers
+ ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD) AR=$(HOSTAR) $@ install_headers
$(LIBBPF_BOOTSTRAP_INTERNAL_HDRS): $(LIBBPF_BOOTSTRAP_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_BOOTSTRAP_HDRS_DIR)
$(call QUIET_INSTALL, $@)
@@ -93,10 +93,8 @@ INSTALL ?= install
RM ?= rm -f
FEATURE_USER = .bpftool
-FEATURE_TESTS = libbfd disassembler-four-args zlib libcap \
- clang-bpf-co-re
-FEATURE_DISPLAY = libbfd disassembler-four-args zlib libcap \
- clang-bpf-co-re
+FEATURE_TESTS = libbfd disassembler-four-args libcap clang-bpf-co-re
+FEATURE_DISPLAY = libbfd disassembler-four-args libcap clang-bpf-co-re
check_feat := 1
NON_CHECK_FEAT_TARGETS := clean uninstall doc doc-clean doc-install doc-uninstall
@@ -204,11 +202,6 @@ $(BOOTSTRAP_OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
$(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
$(QUIET_CC)$(CC) $(CFLAGS) -c -MMD $< -o $@
-$(OUTPUT)feature.o:
-ifneq ($(feature-zlib), 1)
- $(error "No zlib found")
-endif
-
$(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF_BOOTSTRAP)
$(QUIET_LINK)$(HOSTCC) $(HOST_CFLAGS) $(LDFLAGS) $(BOOTSTRAP_OBJS) $(LIBS_BOOTSTRAP) -o $@
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 5df8d72c5179..dc1641e3670e 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -407,8 +407,8 @@ _bpftool()
return 0
;;
5)
- local BPFTOOL_PROG_ATTACH_TYPES='msg_verdict \
- skb_verdict stream_verdict stream_parser \
+ local BPFTOOL_PROG_ATTACH_TYPES='sk_msg_verdict \
+ sk_skb_verdict sk_skb_stream_verdict sk_skb_stream_parser \
flow_dissector'
COMPREPLY=( $( compgen -W "$BPFTOOL_PROG_ATTACH_TYPES" -- "$cur" ) )
return 0
@@ -703,15 +703,8 @@ _bpftool()
return 0
;;
type)
- local BPFTOOL_MAP_CREATE_TYPES='hash array \
- prog_array perf_event_array percpu_hash \
- percpu_array stack_trace cgroup_array lru_hash \
- lru_percpu_hash lpm_trie array_of_maps \
- hash_of_maps devmap devmap_hash sockmap cpumap \
- xskmap sockhash cgroup_storage reuseport_sockarray \
- percpu_cgroup_storage queue stack sk_storage \
- struct_ops ringbuf inode_storage task_storage \
- bloom_filter'
+ local BPFTOOL_MAP_CREATE_TYPES="$(bpftool feature list_builtins map_types 2>/dev/null | \
+ grep -v '^unspec$')"
COMPREPLY=( $( compgen -W "$BPFTOOL_MAP_CREATE_TYPES" -- "$cur" ) )
return 0
;;
@@ -1039,12 +1032,8 @@ _bpftool()
return 0
;;
attach|detach)
- local BPFTOOL_CGROUP_ATTACH_TYPES='ingress egress \
- sock_create sock_ops device \
- bind4 bind6 post_bind4 post_bind6 connect4 connect6 \
- getpeername4 getpeername6 getsockname4 getsockname6 \
- sendmsg4 sendmsg6 recvmsg4 recvmsg6 sysctl getsockopt \
- setsockopt sock_release'
+ local BPFTOOL_CGROUP_ATTACH_TYPES="$(bpftool feature list_builtins attach_types 2>/dev/null | \
+ grep '^cgroup_')"
local ATTACH_FLAGS='multi override'
local PROG_TYPE='id pinned tag name'
# Check for $prev = $command first
@@ -1173,9 +1162,14 @@ _bpftool()
_bpftool_once_attr 'full unprivileged'
return 0
;;
+ list_builtins)
+ [[ $prev != "$command" ]] && return 0
+ COMPREPLY=( $( compgen -W 'prog_types map_types \
+ attach_types link_types helpers' -- "$cur" ) )
+ ;;
*)
[[ $prev == $object ]] && \
- COMPREPLY=( $( compgen -W 'help probe' -- "$cur" ) )
+ COMPREPLY=( $( compgen -W 'help list_builtins probe' -- "$cur" ) )
;;
esac
;;
diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
index a2c665beda87..0744bd1150be 100644
--- a/tools/bpf/bpftool/btf.c
+++ b/tools/bpf/bpftool/btf.c
@@ -40,6 +40,7 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = {
[BTF_KIND_FLOAT] = "FLOAT",
[BTF_KIND_DECL_TAG] = "DECL_TAG",
[BTF_KIND_TYPE_TAG] = "TYPE_TAG",
+ [BTF_KIND_ENUM64] = "ENUM64",
};
struct btf_attach_point {
@@ -212,26 +213,76 @@ static int dump_btf_type(const struct btf *btf, __u32 id,
case BTF_KIND_ENUM: {
const struct btf_enum *v = (const void *)(t + 1);
__u16 vlen = BTF_INFO_VLEN(t->info);
+ const char *encoding;
int i;
+ encoding = btf_kflag(t) ? "SIGNED" : "UNSIGNED";
if (json_output) {
+ jsonw_string_field(w, "encoding", encoding);
jsonw_uint_field(w, "size", t->size);
jsonw_uint_field(w, "vlen", vlen);
jsonw_name(w, "values");
jsonw_start_array(w);
} else {
- printf(" size=%u vlen=%u", t->size, vlen);
+ printf(" encoding=%s size=%u vlen=%u", encoding, t->size, vlen);
+ }
+ for (i = 0; i < vlen; i++, v++) {
+ const char *name = btf_str(btf, v->name_off);
+
+ if (json_output) {
+ jsonw_start_object(w);
+ jsonw_string_field(w, "name", name);
+ if (btf_kflag(t))
+ jsonw_int_field(w, "val", v->val);
+ else
+ jsonw_uint_field(w, "val", v->val);
+ jsonw_end_object(w);
+ } else {
+ if (btf_kflag(t))
+ printf("\n\t'%s' val=%d", name, v->val);
+ else
+ printf("\n\t'%s' val=%u", name, v->val);
+ }
+ }
+ if (json_output)
+ jsonw_end_array(w);
+ break;
+ }
+ case BTF_KIND_ENUM64: {
+ const struct btf_enum64 *v = btf_enum64(t);
+ __u16 vlen = btf_vlen(t);
+ const char *encoding;
+ int i;
+
+ encoding = btf_kflag(t) ? "SIGNED" : "UNSIGNED";
+ if (json_output) {
+ jsonw_string_field(w, "encoding", encoding);
+ jsonw_uint_field(w, "size", t->size);
+ jsonw_uint_field(w, "vlen", vlen);
+ jsonw_name(w, "values");
+ jsonw_start_array(w);
+ } else {
+ printf(" encoding=%s size=%u vlen=%u", encoding, t->size, vlen);
}
for (i = 0; i < vlen; i++, v++) {
const char *name = btf_str(btf, v->name_off);
+ __u64 val = ((__u64)v->val_hi32 << 32) | v->val_lo32;
if (json_output) {
jsonw_start_object(w);
jsonw_string_field(w, "name", name);
- jsonw_uint_field(w, "val", v->val);
+ if (btf_kflag(t))
+ jsonw_int_field(w, "val", val);
+ else
+ jsonw_uint_field(w, "val", val);
jsonw_end_object(w);
} else {
- printf("\n\t'%s' val=%u", name, v->val);
+ if (btf_kflag(t))
+ printf("\n\t'%s' val=%lldLL", name,
+ (unsigned long long)val);
+ else
+ printf("\n\t'%s' val=%lluULL", name,
+ (unsigned long long)val);
}
}
if (json_output)
@@ -459,6 +510,51 @@ done:
return err;
}
+static const char sysfs_vmlinux[] = "/sys/kernel/btf/vmlinux";
+
+static struct btf *get_vmlinux_btf_from_sysfs(void)
+{
+ struct btf *base;
+
+ base = btf__parse(sysfs_vmlinux, NULL);
+ if (libbpf_get_error(base)) {
+ p_err("failed to parse vmlinux BTF at '%s': %ld\n",
+ sysfs_vmlinux, libbpf_get_error(base));
+ base = NULL;
+ }
+
+ return base;
+}
+
+#define BTF_NAME_BUFF_LEN 64
+
+static bool btf_is_kernel_module(__u32 btf_id)
+{
+ struct bpf_btf_info btf_info = {};
+ char btf_name[BTF_NAME_BUFF_LEN];
+ int btf_fd;
+ __u32 len;
+ int err;
+
+ btf_fd = bpf_btf_get_fd_by_id(btf_id);
+ if (btf_fd < 0) {
+ p_err("can't get BTF object by id (%u): %s", btf_id, strerror(errno));
+ return false;
+ }
+
+ len = sizeof(btf_info);
+ btf_info.name = ptr_to_u64(btf_name);
+ btf_info.name_len = sizeof(btf_name);
+ err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
+ close(btf_fd);
+ if (err) {
+ p_err("can't get BTF (ID %u) object info: %s", btf_id, strerror(errno));
+ return false;
+ }
+
+ return btf_info.kernel_btf && strncmp(btf_name, "vmlinux", sizeof(btf_name)) != 0;
+}
+
static int do_dump(int argc, char **argv)
{
struct btf *btf = NULL, *base = NULL;
@@ -536,18 +632,11 @@ static int do_dump(int argc, char **argv)
NEXT_ARG();
} else if (is_prefix(src, "file")) {
const char sysfs_prefix[] = "/sys/kernel/btf/";
- const char sysfs_vmlinux[] = "/sys/kernel/btf/vmlinux";
if (!base_btf &&
strncmp(*argv, sysfs_prefix, sizeof(sysfs_prefix) - 1) == 0 &&
- strcmp(*argv, sysfs_vmlinux) != 0) {
- base = btf__parse(sysfs_vmlinux, NULL);
- if (libbpf_get_error(base)) {
- p_err("failed to parse vmlinux BTF at '%s': %ld\n",
- sysfs_vmlinux, libbpf_get_error(base));
- base = NULL;
- }
- }
+ strcmp(*argv, sysfs_vmlinux) != 0)
+ base = get_vmlinux_btf_from_sysfs();
btf = btf__parse_split(*argv, base ?: base_btf);
err = libbpf_get_error(btf);
@@ -591,6 +680,12 @@ static int do_dump(int argc, char **argv)
}
if (!btf) {
+ if (!base_btf && btf_is_kernel_module(btf_id)) {
+ p_info("Warning: valid base BTF was not specified with -B option, falling back to standard base BTF (%s)",
+ sysfs_vmlinux);
+ base_btf = get_vmlinux_btf_from_sysfs();
+ }
+
btf = btf__load_from_kernel_by_id_split(btf_id, base_btf);
err = libbpf_get_error(btf);
if (err) {
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
index f5dddf8ef404..125798b0bc5d 100644
--- a/tools/bpf/bpftool/btf_dumper.c
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -182,6 +182,32 @@ static int btf_dumper_enum(const struct btf_dumper *d,
return 0;
}
+static int btf_dumper_enum64(const struct btf_dumper *d,
+ const struct btf_type *t,
+ const void *data)
+{
+ const struct btf_enum64 *enums = btf_enum64(t);
+ __u32 val_lo32, val_hi32;
+ __u64 value;
+ __u16 i;
+
+ value = *(__u64 *)data;
+ val_lo32 = (__u32)value;
+ val_hi32 = value >> 32;
+
+ for (i = 0; i < btf_vlen(t); i++) {
+ if (val_lo32 == enums[i].val_lo32 && val_hi32 == enums[i].val_hi32) {
+ jsonw_string(d->jw,
+ btf__name_by_offset(d->btf,
+ enums[i].name_off));
+ return 0;
+ }
+ }
+
+ jsonw_int(d->jw, value);
+ return 0;
+}
+
static bool is_str_array(const struct btf *btf, const struct btf_array *arr,
const char *s)
{
@@ -542,6 +568,8 @@ static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
return btf_dumper_array(d, type_id, data);
case BTF_KIND_ENUM:
return btf_dumper_enum(d, t, data);
+ case BTF_KIND_ENUM64:
+ return btf_dumper_enum64(d, t, data);
case BTF_KIND_PTR:
btf_dumper_ptr(d, t, data);
return 0;
@@ -618,6 +646,7 @@ static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
BTF_PRINT_ARG("enum %s ",
btf__name_by_offset(btf, t->name_off));
break;
diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
index effe136119d7..cced668fb2a3 100644
--- a/tools/bpf/bpftool/cgroup.c
+++ b/tools/bpf/bpftool/cgroup.c
@@ -15,43 +15,92 @@
#include <unistd.h>
#include <bpf/bpf.h>
+#include <bpf/btf.h>
#include "main.h"
#define HELP_SPEC_ATTACH_FLAGS \
"ATTACH_FLAGS := { multi | override }"
-#define HELP_SPEC_ATTACH_TYPES \
- " ATTACH_TYPE := { ingress | egress | sock_create |\n" \
- " sock_ops | device | bind4 | bind6 |\n" \
- " post_bind4 | post_bind6 | connect4 |\n" \
- " connect6 | getpeername4 | getpeername6 |\n" \
- " getsockname4 | getsockname6 | sendmsg4 |\n" \
- " sendmsg6 | recvmsg4 | recvmsg6 |\n" \
- " sysctl | getsockopt | setsockopt |\n" \
- " sock_release }"
+#define HELP_SPEC_ATTACH_TYPES \
+ " ATTACH_TYPE := { cgroup_inet_ingress | cgroup_inet_egress |\n" \
+ " cgroup_inet_sock_create | cgroup_sock_ops |\n" \
+ " cgroup_device | cgroup_inet4_bind |\n" \
+ " cgroup_inet6_bind | cgroup_inet4_post_bind |\n" \
+ " cgroup_inet6_post_bind | cgroup_inet4_connect |\n" \
+ " cgroup_inet6_connect | cgroup_inet4_getpeername |\n" \
+ " cgroup_inet6_getpeername | cgroup_inet4_getsockname |\n" \
+ " cgroup_inet6_getsockname | cgroup_udp4_sendmsg |\n" \
+ " cgroup_udp6_sendmsg | cgroup_udp4_recvmsg |\n" \
+ " cgroup_udp6_recvmsg | cgroup_sysctl |\n" \
+ " cgroup_getsockopt | cgroup_setsockopt |\n" \
+ " cgroup_inet_sock_release }"
static unsigned int query_flags;
+static struct btf *btf_vmlinux;
+static __u32 btf_vmlinux_id;
static enum bpf_attach_type parse_attach_type(const char *str)
{
+ const char *attach_type_str;
enum bpf_attach_type type;
- for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
- if (attach_type_name[type] &&
- is_prefix(str, attach_type_name[type]))
+ for (type = 0; ; type++) {
+ attach_type_str = libbpf_bpf_attach_type_str(type);
+ if (!attach_type_str)
+ break;
+ if (!strcmp(str, attach_type_str))
+ return type;
+ }
+
+ /* Also check traditionally used attach type strings. For these we keep
+ * allowing prefixed usage.
+ */
+ for (type = 0; ; type++) {
+ attach_type_str = bpf_attach_type_input_str(type);
+ if (!attach_type_str)
+ break;
+ if (is_prefix(str, attach_type_str))
return type;
}
return __MAX_BPF_ATTACH_TYPE;
}
+static void guess_vmlinux_btf_id(__u32 attach_btf_obj_id)
+{
+ struct bpf_btf_info btf_info = {};
+ __u32 btf_len = sizeof(btf_info);
+ char name[16] = {};
+ int err;
+ int fd;
+
+ btf_info.name = ptr_to_u64(name);
+ btf_info.name_len = sizeof(name);
+
+ fd = bpf_btf_get_fd_by_id(attach_btf_obj_id);
+ if (fd < 0)
+ return;
+
+ err = bpf_obj_get_info_by_fd(fd, &btf_info, &btf_len);
+ if (err)
+ goto out;
+
+ if (btf_info.kernel_btf && strncmp(name, "vmlinux", sizeof(name)) == 0)
+ btf_vmlinux_id = btf_info.id;
+
+out:
+ close(fd);
+}
+
static int show_bpf_prog(int id, enum bpf_attach_type attach_type,
const char *attach_flags_str,
int level)
{
char prog_name[MAX_PROG_FULL_NAME];
+ const char *attach_btf_name = NULL;
struct bpf_prog_info info = {};
+ const char *attach_type_str;
__u32 info_len = sizeof(info);
int prog_fd;
@@ -64,26 +113,50 @@ static int show_bpf_prog(int id, enum bpf_attach_type attach_type,
return -1;
}
+ attach_type_str = libbpf_bpf_attach_type_str(attach_type);
+
+ if (btf_vmlinux) {
+ if (!btf_vmlinux_id)
+ guess_vmlinux_btf_id(info.attach_btf_obj_id);
+
+ if (btf_vmlinux_id == info.attach_btf_obj_id &&
+ info.attach_btf_id < btf__type_cnt(btf_vmlinux)) {
+ const struct btf_type *t =
+ btf__type_by_id(btf_vmlinux, info.attach_btf_id);
+ attach_btf_name =
+ btf__name_by_offset(btf_vmlinux, t->name_off);
+ }
+ }
+
get_prog_full_name(&info, prog_fd, prog_name, sizeof(prog_name));
if (json_output) {
jsonw_start_object(json_wtr);
jsonw_uint_field(json_wtr, "id", info.id);
- if (attach_type < ARRAY_SIZE(attach_type_name))
- jsonw_string_field(json_wtr, "attach_type",
- attach_type_name[attach_type]);
+ if (attach_type_str)
+ jsonw_string_field(json_wtr, "attach_type", attach_type_str);
else
jsonw_uint_field(json_wtr, "attach_type", attach_type);
jsonw_string_field(json_wtr, "attach_flags",
attach_flags_str);
jsonw_string_field(json_wtr, "name", prog_name);
+ if (attach_btf_name)
+ jsonw_string_field(json_wtr, "attach_btf_name", attach_btf_name);
+ jsonw_uint_field(json_wtr, "attach_btf_obj_id", info.attach_btf_obj_id);
+ jsonw_uint_field(json_wtr, "attach_btf_id", info.attach_btf_id);
jsonw_end_object(json_wtr);
} else {
printf("%s%-8u ", level ? " " : "", info.id);
- if (attach_type < ARRAY_SIZE(attach_type_name))
- printf("%-15s", attach_type_name[attach_type]);
+ if (attach_type_str)
+ printf("%-15s", attach_type_str);
else
printf("type %-10u", attach_type);
- printf(" %-15s %-15s\n", attach_flags_str, prog_name);
+ printf(" %-15s %-15s", attach_flags_str, prog_name);
+ if (attach_btf_name)
+ printf(" %-15s", attach_btf_name);
+ else if (info.attach_btf_id)
+ printf(" attach_btf_obj_id=%d attach_btf_id=%d",
+ info.attach_btf_obj_id, info.attach_btf_id);
+ printf("\n");
}
close(prog_fd);
@@ -125,40 +198,49 @@ static int cgroup_has_attached_progs(int cgroup_fd)
static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
int level)
{
+ LIBBPF_OPTS(bpf_prog_query_opts, p);
+ __u32 prog_attach_flags[1024] = {0};
const char *attach_flags_str;
__u32 prog_ids[1024] = {0};
- __u32 prog_cnt, iter;
- __u32 attach_flags;
char buf[32];
+ __u32 iter;
int ret;
- prog_cnt = ARRAY_SIZE(prog_ids);
- ret = bpf_prog_query(cgroup_fd, type, query_flags, &attach_flags,
- prog_ids, &prog_cnt);
+ p.query_flags = query_flags;
+ p.prog_cnt = ARRAY_SIZE(prog_ids);
+ p.prog_ids = prog_ids;
+ p.prog_attach_flags = prog_attach_flags;
+
+ ret = bpf_prog_query_opts(cgroup_fd, type, &p);
if (ret)
return ret;
- if (prog_cnt == 0)
+ if (p.prog_cnt == 0)
return 0;
- switch (attach_flags) {
- case BPF_F_ALLOW_MULTI:
- attach_flags_str = "multi";
- break;
- case BPF_F_ALLOW_OVERRIDE:
- attach_flags_str = "override";
- break;
- case 0:
- attach_flags_str = "";
- break;
- default:
- snprintf(buf, sizeof(buf), "unknown(%x)", attach_flags);
- attach_flags_str = buf;
- }
+ for (iter = 0; iter < p.prog_cnt; iter++) {
+ __u32 attach_flags;
+
+ attach_flags = prog_attach_flags[iter] ?: p.attach_flags;
+
+ switch (attach_flags) {
+ case BPF_F_ALLOW_MULTI:
+ attach_flags_str = "multi";
+ break;
+ case BPF_F_ALLOW_OVERRIDE:
+ attach_flags_str = "override";
+ break;
+ case 0:
+ attach_flags_str = "";
+ break;
+ default:
+ snprintf(buf, sizeof(buf), "unknown(%x)", attach_flags);
+ attach_flags_str = buf;
+ }
- for (iter = 0; iter < prog_cnt; iter++)
show_bpf_prog(prog_ids[iter], type,
attach_flags_str, level);
+ }
return 0;
}
@@ -214,6 +296,7 @@ static int do_show(int argc, char **argv)
printf("%-8s %-15s %-15s %-15s\n", "ID", "AttachType",
"AttachFlags", "Name");
+ btf_vmlinux = libbpf_find_kernel_btf();
for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
/*
* Not all attach types may be supported, so it's expected,
@@ -277,6 +360,7 @@ static int do_show_tree_fn(const char *fpath, const struct stat *sb,
printf("%s\n", fpath);
}
+ btf_vmlinux = libbpf_find_kernel_btf();
for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++)
show_attached_bpf_progs(cgroup_fd, type, ftw->level);
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 0c1e06cf50b9..067e9ea59e3b 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -13,14 +13,17 @@
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
-#include <linux/limits.h>
-#include <linux/magic.h>
#include <net/if.h>
#include <sys/mount.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/vfs.h>
+#include <linux/filter.h>
+#include <linux/limits.h>
+#include <linux/magic.h>
+#include <linux/unistd.h>
+
#include <bpf/bpf.h>
#include <bpf/hashmap.h>
#include <bpf/libbpf.h> /* libbpf_num_possible_cpus */
@@ -32,52 +35,6 @@
#define BPF_FS_MAGIC 0xcafe4a11
#endif
-const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = {
- [BPF_CGROUP_INET_INGRESS] = "ingress",
- [BPF_CGROUP_INET_EGRESS] = "egress",
- [BPF_CGROUP_INET_SOCK_CREATE] = "sock_create",
- [BPF_CGROUP_INET_SOCK_RELEASE] = "sock_release",
- [BPF_CGROUP_SOCK_OPS] = "sock_ops",
- [BPF_CGROUP_DEVICE] = "device",
- [BPF_CGROUP_INET4_BIND] = "bind4",
- [BPF_CGROUP_INET6_BIND] = "bind6",
- [BPF_CGROUP_INET4_CONNECT] = "connect4",
- [BPF_CGROUP_INET6_CONNECT] = "connect6",
- [BPF_CGROUP_INET4_POST_BIND] = "post_bind4",
- [BPF_CGROUP_INET6_POST_BIND] = "post_bind6",
- [BPF_CGROUP_INET4_GETPEERNAME] = "getpeername4",
- [BPF_CGROUP_INET6_GETPEERNAME] = "getpeername6",
- [BPF_CGROUP_INET4_GETSOCKNAME] = "getsockname4",
- [BPF_CGROUP_INET6_GETSOCKNAME] = "getsockname6",
- [BPF_CGROUP_UDP4_SENDMSG] = "sendmsg4",
- [BPF_CGROUP_UDP6_SENDMSG] = "sendmsg6",
- [BPF_CGROUP_SYSCTL] = "sysctl",
- [BPF_CGROUP_UDP4_RECVMSG] = "recvmsg4",
- [BPF_CGROUP_UDP6_RECVMSG] = "recvmsg6",
- [BPF_CGROUP_GETSOCKOPT] = "getsockopt",
- [BPF_CGROUP_SETSOCKOPT] = "setsockopt",
- [BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser",
- [BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict",
- [BPF_SK_SKB_VERDICT] = "sk_skb_verdict",
- [BPF_SK_MSG_VERDICT] = "sk_msg_verdict",
- [BPF_LIRC_MODE2] = "lirc_mode2",
- [BPF_FLOW_DISSECTOR] = "flow_dissector",
- [BPF_TRACE_RAW_TP] = "raw_tp",
- [BPF_TRACE_FENTRY] = "fentry",
- [BPF_TRACE_FEXIT] = "fexit",
- [BPF_MODIFY_RETURN] = "mod_ret",
- [BPF_LSM_MAC] = "lsm_mac",
- [BPF_SK_LOOKUP] = "sk_lookup",
- [BPF_TRACE_ITER] = "trace_iter",
- [BPF_XDP_DEVMAP] = "xdp_devmap",
- [BPF_XDP_CPUMAP] = "xdp_cpumap",
- [BPF_XDP] = "xdp",
- [BPF_SK_REUSEPORT_SELECT] = "sk_skb_reuseport_select",
- [BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_skb_reuseport_select_or_migrate",
- [BPF_PERF_EVENT] = "perf_event",
- [BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi",
-};
-
void p_err(const char *fmt, ...)
{
va_list ap;
@@ -119,11 +76,73 @@ static bool is_bpffs(char *path)
return (unsigned long)st_fs.f_type == BPF_FS_MAGIC;
}
+/* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to
+ * memcg-based memory accounting for BPF maps and programs. This was done in
+ * commit 97306be45fbe ("Merge branch 'switch to memcg-based memory
+ * accounting'"), in Linux 5.11.
+ *
+ * Libbpf also offers to probe for memcg-based accounting vs rlimit, but does
+ * so by checking for the availability of a given BPF helper and this has
+ * failed on some kernels with backports in the past, see commit 6b4384ff1088
+ * ("Revert "bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK"").
+ * Instead, we can probe by lowering the process-based rlimit to 0, trying to
+ * load a BPF object, and resetting the rlimit. If the load succeeds then
+ * memcg-based accounting is supported.
+ *
+ * This would be too dangerous to do in the library, because multithreaded
+ * applications might attempt to load items while the rlimit is at 0. Given
+ * that bpftool is single-threaded, this is fine to do here.
+ */
+static bool known_to_need_rlimit(void)
+{
+ struct rlimit rlim_init, rlim_cur_zero = {};
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ size_t insn_cnt = ARRAY_SIZE(insns);
+ union bpf_attr attr;
+ int prog_fd, err;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+ attr.insns = ptr_to_u64(insns);
+ attr.insn_cnt = insn_cnt;
+ attr.license = ptr_to_u64("GPL");
+
+ if (getrlimit(RLIMIT_MEMLOCK, &rlim_init))
+ return false;
+
+ /* Drop the soft limit to zero. We maintain the hard limit to its
+ * current value, because lowering it would be a permanent operation
+ * for unprivileged users.
+ */
+ rlim_cur_zero.rlim_max = rlim_init.rlim_max;
+ if (setrlimit(RLIMIT_MEMLOCK, &rlim_cur_zero))
+ return false;
+
+ /* Do not use bpf_prog_load() from libbpf here, because it calls
+ * bump_rlimit_memlock(), interfering with the current probe.
+ */
+ prog_fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
+ err = errno;
+
+ /* reset soft rlimit to its initial value */
+ setrlimit(RLIMIT_MEMLOCK, &rlim_init);
+
+ if (prog_fd < 0)
+ return err == EPERM;
+
+ close(prog_fd);
+ return false;
+}
+
void set_max_rlimit(void)
{
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
- setrlimit(RLIMIT_MEMLOCK, &rinf);
+ if (known_to_need_rlimit())
+ setrlimit(RLIMIT_MEMLOCK, &rinf);
}
static int
@@ -297,6 +316,7 @@ const char *get_fd_type_name(enum bpf_obj_type type)
[BPF_OBJ_UNKNOWN] = "unknown",
[BPF_OBJ_PROG] = "prog",
[BPF_OBJ_MAP] = "map",
+ [BPF_OBJ_LINK] = "link",
};
if (type < 0 || type >= ARRAY_SIZE(names) || !names[type])
@@ -1017,3 +1037,39 @@ bool equal_fn_for_key_as_id(const void *k1, const void *k2, void *ctx)
{
return k1 == k2;
}
+
+const char *bpf_attach_type_input_str(enum bpf_attach_type t)
+{
+ switch (t) {
+ case BPF_CGROUP_INET_INGRESS: return "ingress";
+ case BPF_CGROUP_INET_EGRESS: return "egress";
+ case BPF_CGROUP_INET_SOCK_CREATE: return "sock_create";
+ case BPF_CGROUP_INET_SOCK_RELEASE: return "sock_release";
+ case BPF_CGROUP_SOCK_OPS: return "sock_ops";
+ case BPF_CGROUP_DEVICE: return "device";
+ case BPF_CGROUP_INET4_BIND: return "bind4";
+ case BPF_CGROUP_INET6_BIND: return "bind6";
+ case BPF_CGROUP_INET4_CONNECT: return "connect4";
+ case BPF_CGROUP_INET6_CONNECT: return "connect6";
+ case BPF_CGROUP_INET4_POST_BIND: return "post_bind4";
+ case BPF_CGROUP_INET6_POST_BIND: return "post_bind6";
+ case BPF_CGROUP_INET4_GETPEERNAME: return "getpeername4";
+ case BPF_CGROUP_INET6_GETPEERNAME: return "getpeername6";
+ case BPF_CGROUP_INET4_GETSOCKNAME: return "getsockname4";
+ case BPF_CGROUP_INET6_GETSOCKNAME: return "getsockname6";
+ case BPF_CGROUP_UDP4_SENDMSG: return "sendmsg4";
+ case BPF_CGROUP_UDP6_SENDMSG: return "sendmsg6";
+ case BPF_CGROUP_SYSCTL: return "sysctl";
+ case BPF_CGROUP_UDP4_RECVMSG: return "recvmsg4";
+ case BPF_CGROUP_UDP6_RECVMSG: return "recvmsg6";
+ case BPF_CGROUP_GETSOCKOPT: return "getsockopt";
+ case BPF_CGROUP_SETSOCKOPT: return "setsockopt";
+ case BPF_TRACE_RAW_TP: return "raw_tp";
+ case BPF_TRACE_FENTRY: return "fentry";
+ case BPF_TRACE_FEXIT: return "fexit";
+ case BPF_MODIFY_RETURN: return "mod_ret";
+ case BPF_SK_REUSEPORT_SELECT: return "sk_skb_reuseport_select";
+ case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: return "sk_skb_reuseport_select_or_migrate";
+ default: return libbpf_bpf_attach_type_str(t);
+ }
+}
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
index 290998c82de1..7ecabf7947fb 100644
--- a/tools/bpf/bpftool/feature.c
+++ b/tools/bpf/bpftool/feature.c
@@ -548,8 +548,8 @@ static bool probe_prog_type_ifindex(enum bpf_prog_type prog_type, __u32 ifindex)
}
static void
-probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types,
- const char *define_prefix, __u32 ifindex)
+probe_prog_type(enum bpf_prog_type prog_type, const char *prog_type_str,
+ bool *supported_types, const char *define_prefix, __u32 ifindex)
{
char feat_name[128], plain_desc[128], define_name[128];
const char *plain_comment = "eBPF program_type ";
@@ -567,7 +567,7 @@ probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types,
res = probe_prog_type_ifindex(prog_type, ifindex);
} else {
- res = libbpf_probe_bpf_prog_type(prog_type, NULL);
+ res = libbpf_probe_bpf_prog_type(prog_type, NULL) > 0;
}
#ifdef USE_LIBCAP
@@ -580,20 +580,16 @@ probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types,
supported_types[prog_type] |= res;
- if (!prog_type_name[prog_type]) {
- p_info("program type name not found (type %d)", prog_type);
- return;
- }
maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1;
- if (strlen(prog_type_name[prog_type]) > maxlen) {
+ if (strlen(prog_type_str) > maxlen) {
p_info("program type name too long");
return;
}
- sprintf(feat_name, "have_%s_prog_type", prog_type_name[prog_type]);
- sprintf(define_name, "%s_prog_type", prog_type_name[prog_type]);
+ sprintf(feat_name, "have_%s_prog_type", prog_type_str);
+ sprintf(define_name, "%s_prog_type", prog_type_str);
uppercase(define_name, sizeof(define_name));
- sprintf(plain_desc, "%s%s", plain_comment, prog_type_name[prog_type]);
+ sprintf(plain_desc, "%s%s", plain_comment, prog_type_str);
print_bool_feature(feat_name, plain_desc, define_name, res,
define_prefix);
}
@@ -619,8 +615,8 @@ static bool probe_map_type_ifindex(enum bpf_map_type map_type, __u32 ifindex)
}
static void
-probe_map_type(enum bpf_map_type map_type, const char *define_prefix,
- __u32 ifindex)
+probe_map_type(enum bpf_map_type map_type, char const *map_type_str,
+ const char *define_prefix, __u32 ifindex)
{
char feat_name[128], plain_desc[128], define_name[128];
const char *plain_comment = "eBPF map_type ";
@@ -638,27 +634,23 @@ probe_map_type(enum bpf_map_type map_type, const char *define_prefix,
res = probe_map_type_ifindex(map_type, ifindex);
} else {
- res = libbpf_probe_bpf_map_type(map_type, NULL);
+ res = libbpf_probe_bpf_map_type(map_type, NULL) > 0;
}
/* Probe result depends on the success of map creation, no additional
* check required for unprivileged users
*/
- if (!map_type_name[map_type]) {
- p_info("map type name not found (type %d)", map_type);
- return;
- }
maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1;
- if (strlen(map_type_name[map_type]) > maxlen) {
+ if (strlen(map_type_str) > maxlen) {
p_info("map type name too long");
return;
}
- sprintf(feat_name, "have_%s_map_type", map_type_name[map_type]);
- sprintf(define_name, "%s_map_type", map_type_name[map_type]);
+ sprintf(feat_name, "have_%s_map_type", map_type_str);
+ sprintf(define_name, "%s_map_type", map_type_str);
uppercase(define_name, sizeof(define_name));
- sprintf(plain_desc, "%s%s", plain_comment, map_type_name[map_type]);
+ sprintf(plain_desc, "%s%s", plain_comment, map_type_str);
print_bool_feature(feat_name, plain_desc, define_name, res,
define_prefix);
}
@@ -690,7 +682,7 @@ probe_helper_ifindex(enum bpf_func_id id, enum bpf_prog_type prog_type,
return res;
}
-static void
+static bool
probe_helper_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
const char *define_prefix, unsigned int id,
const char *ptype_name, __u32 ifindex)
@@ -701,7 +693,7 @@ probe_helper_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
if (ifindex)
res = probe_helper_ifindex(id, prog_type, ifindex);
else
- res = libbpf_probe_bpf_helper(prog_type, id, NULL);
+ res = libbpf_probe_bpf_helper(prog_type, id, NULL) > 0;
#ifdef USE_LIBCAP
/* Probe may succeed even if program load fails, for
* unprivileged users check that we did not fail because of
@@ -723,15 +715,18 @@ probe_helper_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
if (res)
printf("\n\t- %s", helper_name[id]);
}
+
+ return res;
}
static void
-probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
+probe_helpers_for_progtype(enum bpf_prog_type prog_type,
+ const char *prog_type_str, bool supported_type,
const char *define_prefix, __u32 ifindex)
{
- const char *ptype_name = prog_type_name[prog_type];
char feat_name[128];
unsigned int id;
+ bool probe_res = false;
if (ifindex)
/* Only test helpers for offload-able program types */
@@ -744,12 +739,12 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
}
if (json_output) {
- sprintf(feat_name, "%s_available_helpers", ptype_name);
+ sprintf(feat_name, "%s_available_helpers", prog_type_str);
jsonw_name(json_wtr, feat_name);
jsonw_start_array(json_wtr);
} else if (!define_prefix) {
printf("eBPF helpers supported for program type %s:",
- ptype_name);
+ prog_type_str);
}
for (id = 1; id < ARRAY_SIZE(helper_name); id++) {
@@ -764,16 +759,25 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
continue;
/* fallthrough */
default:
- probe_helper_for_progtype(prog_type, supported_type,
- define_prefix, id, ptype_name,
+ probe_res |= probe_helper_for_progtype(prog_type, supported_type,
+ define_prefix, id, prog_type_str,
ifindex);
}
}
if (json_output)
jsonw_end_array(json_wtr);
- else if (!define_prefix)
+ else if (!define_prefix) {
printf("\n");
+ if (!probe_res) {
+ if (!supported_type)
+ printf("\tProgram type not supported\n");
+ else
+ printf("\tCould not determine which helpers are available\n");
+ }
+ }
+
+
}
static void
@@ -931,30 +935,47 @@ static void
section_program_types(bool *supported_types, const char *define_prefix,
__u32 ifindex)
{
- unsigned int i;
+ unsigned int prog_type = BPF_PROG_TYPE_UNSPEC;
+ const char *prog_type_str;
print_start_section("program_types",
"Scanning eBPF program types...",
"/*** eBPF program types ***/",
define_prefix);
- for (i = BPF_PROG_TYPE_UNSPEC + 1; i < prog_type_name_size; i++)
- probe_prog_type(i, supported_types, define_prefix, ifindex);
+ while (true) {
+ prog_type++;
+ prog_type_str = libbpf_bpf_prog_type_str(prog_type);
+ /* libbpf will return NULL for variants unknown to it. */
+ if (!prog_type_str)
+ break;
+
+ probe_prog_type(prog_type, prog_type_str, supported_types, define_prefix,
+ ifindex);
+ }
print_end_section();
}
static void section_map_types(const char *define_prefix, __u32 ifindex)
{
- unsigned int i;
+ unsigned int map_type = BPF_MAP_TYPE_UNSPEC;
+ const char *map_type_str;
print_start_section("map_types",
"Scanning eBPF map types...",
"/*** eBPF map types ***/",
define_prefix);
- for (i = BPF_MAP_TYPE_UNSPEC + 1; i < map_type_name_size; i++)
- probe_map_type(i, define_prefix, ifindex);
+ while (true) {
+ map_type++;
+ map_type_str = libbpf_bpf_map_type_str(map_type);
+ /* libbpf will return NULL for variants unknown to it. */
+ if (!map_type_str)
+ break;
+
+ probe_map_type(map_type, map_type_str, define_prefix, ifindex);
+ }
print_end_section();
}
@@ -962,7 +983,8 @@ static void section_map_types(const char *define_prefix, __u32 ifindex)
static void
section_helpers(bool *supported_types, const char *define_prefix, __u32 ifindex)
{
- unsigned int i;
+ unsigned int prog_type = BPF_PROG_TYPE_UNSPEC;
+ const char *prog_type_str;
print_start_section("helpers",
"Scanning eBPF helper functions...",
@@ -984,9 +1006,18 @@ section_helpers(bool *supported_types, const char *define_prefix, __u32 ifindex)
" %sBPF__PROG_TYPE_ ## prog_type ## __HELPER_ ## helper\n",
define_prefix, define_prefix, define_prefix,
define_prefix);
- for (i = BPF_PROG_TYPE_UNSPEC + 1; i < prog_type_name_size; i++)
- probe_helpers_for_progtype(i, supported_types[i], define_prefix,
+ while (true) {
+ prog_type++;
+ prog_type_str = libbpf_bpf_prog_type_str(prog_type);
+ /* libbpf will return NULL for variants unknown to it. */
+ if (!prog_type_str)
+ break;
+
+ probe_helpers_for_progtype(prog_type, prog_type_str,
+ supported_types[prog_type],
+ define_prefix,
ifindex);
+ }
print_end_section();
}
@@ -1227,6 +1258,58 @@ exit_close_json:
return 0;
}
+static const char *get_helper_name(unsigned int id)
+{
+ if (id >= ARRAY_SIZE(helper_name))
+ return NULL;
+
+ return helper_name[id];
+}
+
+static int do_list_builtins(int argc, char **argv)
+{
+ const char *(*get_name)(unsigned int id);
+ unsigned int id = 0;
+
+ if (argc < 1)
+ usage();
+
+ if (is_prefix(*argv, "prog_types")) {
+ get_name = (const char *(*)(unsigned int))libbpf_bpf_prog_type_str;
+ } else if (is_prefix(*argv, "map_types")) {
+ get_name = (const char *(*)(unsigned int))libbpf_bpf_map_type_str;
+ } else if (is_prefix(*argv, "attach_types")) {
+ get_name = (const char *(*)(unsigned int))libbpf_bpf_attach_type_str;
+ } else if (is_prefix(*argv, "link_types")) {
+ get_name = (const char *(*)(unsigned int))libbpf_bpf_link_type_str;
+ } else if (is_prefix(*argv, "helpers")) {
+ get_name = get_helper_name;
+ } else {
+ p_err("expected 'prog_types', 'map_types', 'attach_types', 'link_types' or 'helpers', got: %s", *argv);
+ return -1;
+ }
+
+ if (json_output)
+ jsonw_start_array(json_wtr); /* root array */
+
+ while (true) {
+ const char *name;
+
+ name = get_name(id++);
+ if (!name)
+ break;
+ if (json_output)
+ jsonw_string(json_wtr, name);
+ else
+ printf("%s\n", name);
+ }
+
+ if (json_output)
+ jsonw_end_array(json_wtr); /* root array */
+
+ return 0;
+}
+
static int do_help(int argc, char **argv)
{
if (json_output) {
@@ -1236,9 +1319,11 @@ static int do_help(int argc, char **argv)
fprintf(stderr,
"Usage: %1$s %2$s probe [COMPONENT] [full] [unprivileged] [macros [prefix PREFIX]]\n"
+ " %1$s %2$s list_builtins GROUP\n"
" %1$s %2$s help\n"
"\n"
" COMPONENT := { kernel | dev NAME }\n"
+ " GROUP := { prog_types | map_types | attach_types | link_types | helpers }\n"
" " HELP_SPEC_OPTIONS " }\n"
"",
bin_name, argv[-2]);
@@ -1247,8 +1332,9 @@ static int do_help(int argc, char **argv)
}
static const struct cmd cmds[] = {
- { "probe", do_probe },
- { "help", do_help },
+ { "probe", do_probe },
+ { "list_builtins", do_list_builtins },
+ { "help", do_help },
{ 0 }
};
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index 7678af364793..7070dcffa822 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -474,6 +474,9 @@ static void codegen_asserts(struct bpf_object *obj, const char *obj_name)
const struct btf_type *sec;
char map_ident[256], var_ident[256];
+ if (!btf)
+ return;
+
codegen("\
\n\
__attribute__((unused)) static void \n\
@@ -549,6 +552,7 @@ static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name)
printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
break;
case BPF_PROG_TYPE_TRACING:
+ case BPF_PROG_TYPE_LSM:
if (bpf_program__expected_attach_type(prog) == BPF_TRACE_ITER)
printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n");
else
@@ -999,7 +1003,7 @@ static int do_skeleton(int argc, char **argv)
codegen("\
\n\
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
- /* THIS FILE IS AUTOGENERATED! */ \n\
+ /* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\
#ifndef %2$s \n\
#define %2$s \n\
\n\
@@ -1015,7 +1019,7 @@ static int do_skeleton(int argc, char **argv)
\n\
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
\n\
- /* THIS FILE IS AUTOGENERATED! */ \n\
+ /* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\
#ifndef %2$s \n\
#define %2$s \n\
\n\
@@ -1171,7 +1175,7 @@ static int do_skeleton(int argc, char **argv)
static inline void \n\
%1$s__detach(struct %1$s *obj) \n\
{ \n\
- return bpf_object__detach_skeleton(obj->skeleton); \n\
+ bpf_object__detach_skeleton(obj->skeleton); \n\
} \n\
",
obj_name
@@ -1746,6 +1750,7 @@ btfgen_mark_type(struct btfgen_info *info, unsigned int type_id, bool follow_poi
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
break;
@@ -1757,6 +1762,7 @@ btfgen_mark_type(struct btfgen_info *info, unsigned int type_id, bool follow_poi
}
break;
case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
case BTF_KIND_VOLATILE:
case BTF_KIND_TYPEDEF:
err = btfgen_mark_type(info, btf_type->type, follow_pointers);
@@ -1851,6 +1857,112 @@ static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_sp
return 0;
}
+/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
+ * this function does not rely on the target spec for inferring members, but
+ * uses the associated BTF.
+ *
+ * The `behind_ptr` argument is used to stop marking of composite types reached
+ * through a pointer. This way, we can keep BTF size in check while providing
+ * reasonable match semantics.
+ */
+static int btfgen_mark_type_match(struct btfgen_info *info, __u32 type_id, bool behind_ptr)
+{
+ const struct btf_type *btf_type;
+ struct btf *btf = info->src_btf;
+ struct btf_type *cloned_type;
+ int i, err;
+
+ if (type_id == 0)
+ return 0;
+
+ btf_type = btf__type_by_id(btf, type_id);
+ /* mark type on cloned BTF as used */
+ cloned_type = (struct btf_type *)btf__type_by_id(info->marked_btf, type_id);
+ cloned_type->name_off = MARKED;
+
+ switch (btf_kind(btf_type)) {
+ case BTF_KIND_UNKN:
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ break;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ struct btf_member *m = btf_members(btf_type);
+ __u16 vlen = btf_vlen(btf_type);
+
+ if (behind_ptr)
+ break;
+
+ for (i = 0; i < vlen; i++, m++) {
+ /* mark member */
+ btfgen_mark_member(info, type_id, i);
+
+ /* mark member's type */
+ err = btfgen_mark_type_match(info, m->type, false);
+ if (err)
+ return err;
+ }
+ break;
+ }
+ case BTF_KIND_CONST:
+ case BTF_KIND_FWD:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_VOLATILE:
+ return btfgen_mark_type_match(info, btf_type->type, behind_ptr);
+ case BTF_KIND_PTR:
+ return btfgen_mark_type_match(info, btf_type->type, true);
+ case BTF_KIND_ARRAY: {
+ struct btf_array *array;
+
+ array = btf_array(btf_type);
+ /* mark array type */
+ err = btfgen_mark_type_match(info, array->type, false);
+ /* mark array's index type */
+ err = err ? : btfgen_mark_type_match(info, array->index_type, false);
+ if (err)
+ return err;
+ break;
+ }
+ case BTF_KIND_FUNC_PROTO: {
+ __u16 vlen = btf_vlen(btf_type);
+ struct btf_param *param;
+
+ /* mark ret type */
+ err = btfgen_mark_type_match(info, btf_type->type, false);
+ if (err)
+ return err;
+
+ /* mark parameters types */
+ param = btf_params(btf_type);
+ for (i = 0; i < vlen; i++) {
+ err = btfgen_mark_type_match(info, param->type, false);
+ if (err)
+ return err;
+ param++;
+ }
+ break;
+ }
+ /* tells if some other type needs to be handled */
+ default:
+ p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
+ * this function does not rely on the target spec for inferring members, but
+ * uses the associated BTF.
+ */
+static int btfgen_record_type_match_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
+{
+ return btfgen_mark_type_match(info, targ_spec->root_type_id, false);
+}
+
static int btfgen_record_type_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
{
return btfgen_mark_type(info, targ_spec->root_type_id, true);
@@ -1877,6 +1989,8 @@ static int btfgen_record_reloc(struct btfgen_info *info, struct bpf_core_spec *r
case BPF_CORE_TYPE_EXISTS:
case BPF_CORE_TYPE_SIZE:
return btfgen_record_type_relo(info, res);
+ case BPF_CORE_TYPE_MATCHES:
+ return btfgen_record_type_match_relo(info, res);
case BPF_CORE_ENUMVAL_EXISTS:
case BPF_CORE_ENUMVAL_VALUE:
return btfgen_record_enumval_relo(info, res);
diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
index 97dec81950e5..7a20931c3250 100644
--- a/tools/bpf/bpftool/link.c
+++ b/tools/bpf/bpftool/link.c
@@ -13,15 +13,6 @@
#include "json_writer.h"
#include "main.h"
-static const char * const link_type_name[] = {
- [BPF_LINK_TYPE_UNSPEC] = "unspec",
- [BPF_LINK_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
- [BPF_LINK_TYPE_TRACING] = "tracing",
- [BPF_LINK_TYPE_CGROUP] = "cgroup",
- [BPF_LINK_TYPE_ITER] = "iter",
- [BPF_LINK_TYPE_NETNS] = "netns",
-};
-
static struct hashmap *link_table;
static int link_parse_fd(int *argc, char ***argv)
@@ -63,9 +54,12 @@ static int link_parse_fd(int *argc, char ***argv)
static void
show_link_header_json(struct bpf_link_info *info, json_writer_t *wtr)
{
+ const char *link_type_str;
+
jsonw_uint_field(wtr, "id", info->id);
- if (info->type < ARRAY_SIZE(link_type_name))
- jsonw_string_field(wtr, "type", link_type_name[info->type]);
+ link_type_str = libbpf_bpf_link_type_str(info->type);
+ if (link_type_str)
+ jsonw_string_field(wtr, "type", link_type_str);
else
jsonw_uint_field(wtr, "type", info->type);
@@ -74,9 +68,11 @@ show_link_header_json(struct bpf_link_info *info, json_writer_t *wtr)
static void show_link_attach_type_json(__u32 attach_type, json_writer_t *wtr)
{
- if (attach_type < ARRAY_SIZE(attach_type_name))
- jsonw_string_field(wtr, "attach_type",
- attach_type_name[attach_type]);
+ const char *attach_type_str;
+
+ attach_type_str = libbpf_bpf_attach_type_str(attach_type);
+ if (attach_type_str)
+ jsonw_string_field(wtr, "attach_type", attach_type_str);
else
jsonw_uint_field(wtr, "attach_type", attach_type);
}
@@ -117,6 +113,7 @@ static int get_prog_info(int prog_id, struct bpf_prog_info *info)
static int show_link_close_json(int fd, struct bpf_link_info *info)
{
struct bpf_prog_info prog_info;
+ const char *prog_type_str;
int err;
jsonw_start_object(json_wtr);
@@ -133,12 +130,12 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
if (err)
return err;
- if (prog_info.type < prog_type_name_size)
- jsonw_string_field(json_wtr, "prog_type",
- prog_type_name[prog_info.type]);
+ prog_type_str = libbpf_bpf_prog_type_str(prog_info.type);
+ /* libbpf will return NULL for variants unknown to it. */
+ if (prog_type_str)
+ jsonw_string_field(json_wtr, "prog_type", prog_type_str);
else
- jsonw_uint_field(json_wtr, "prog_type",
- prog_info.type);
+ jsonw_uint_field(json_wtr, "prog_type", prog_info.type);
show_link_attach_type_json(info->tracing.attach_type,
json_wtr);
@@ -180,9 +177,12 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
static void show_link_header_plain(struct bpf_link_info *info)
{
+ const char *link_type_str;
+
printf("%u: ", info->id);
- if (info->type < ARRAY_SIZE(link_type_name))
- printf("%s ", link_type_name[info->type]);
+ link_type_str = libbpf_bpf_link_type_str(info->type);
+ if (link_type_str)
+ printf("%s ", link_type_str);
else
printf("type %u ", info->type);
@@ -191,8 +191,11 @@ static void show_link_header_plain(struct bpf_link_info *info)
static void show_link_attach_type_plain(__u32 attach_type)
{
- if (attach_type < ARRAY_SIZE(attach_type_name))
- printf("attach_type %s ", attach_type_name[attach_type]);
+ const char *attach_type_str;
+
+ attach_type_str = libbpf_bpf_attach_type_str(attach_type);
+ if (attach_type_str)
+ printf("attach_type %s ", attach_type_str);
else
printf("attach_type %u ", attach_type);
}
@@ -210,6 +213,7 @@ static void show_iter_plain(struct bpf_link_info *info)
static int show_link_close_plain(int fd, struct bpf_link_info *info)
{
struct bpf_prog_info prog_info;
+ const char *prog_type_str;
int err;
show_link_header_plain(info);
@@ -224,9 +228,10 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
if (err)
return err;
- if (prog_info.type < prog_type_name_size)
- printf("\n\tprog_type %s ",
- prog_type_name[prog_info.type]);
+ prog_type_str = libbpf_bpf_prog_type_str(prog_info.type);
+ /* libbpf will return NULL for variants unknown to it. */
+ if (prog_type_str)
+ printf("\n\tprog_type %s ", prog_type_str);
else
printf("\n\tprog_type %u ", prog_info.type);
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index e81227761f5d..451cefc2d0da 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -507,9 +507,7 @@ int main(int argc, char **argv)
* It will still be rejected if users use LIBBPF_STRICT_ALL
* mode for loading generated skeleton.
*/
- ret = libbpf_set_strict_mode(LIBBPF_STRICT_ALL & ~LIBBPF_STRICT_MAP_DEFINITIONS);
- if (ret)
- p_err("failed to enable libbpf strict mode: %d", ret);
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL & ~LIBBPF_STRICT_MAP_DEFINITIONS);
}
argc -= optind;
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 6e9277ffc68c..5e5060c2ac04 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -63,14 +63,6 @@ static inline void *u64_to_ptr(__u64 ptr)
#define HELP_SPEC_LINK \
"LINK := { id LINK_ID | pinned FILE }"
-extern const char * const prog_type_name[];
-extern const size_t prog_type_name_size;
-
-extern const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE];
-
-extern const char * const map_type_name[];
-extern const size_t map_type_name_size;
-
/* keep in sync with the definition in skeleton/pid_iter.bpf.c */
enum bpf_obj_type {
BPF_OBJ_UNKNOWN,
@@ -251,6 +243,20 @@ int print_all_levels(__maybe_unused enum libbpf_print_level level,
size_t hash_fn_for_key_as_id(const void *key, void *ctx);
bool equal_fn_for_key_as_id(const void *k1, const void *k2, void *ctx);
+/* bpf_attach_type_input_str - convert the provided attach type value into a
+ * textual representation that we accept for input purposes.
+ *
+ * This function is similar in nature to libbpf_bpf_attach_type_str, but
+ * recognizes some attach type names that have been used by the program in the
+ * past and which do not follow the string inference scheme that libbpf uses.
+ * These textual representations should only be used for user input.
+ *
+ * @t: The attach type
+ * Returns a pointer to a static string identifying the attach type. NULL is
+ * returned for unknown bpf_attach_type values.
+ */
+const char *bpf_attach_type_input_str(enum bpf_attach_type t);
+
static inline void *u32_as_hash_field(__u32 x)
{
return (void *)(uintptr_t)x;
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index c26378f20831..38b6bc9c26c3 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -22,42 +22,6 @@
#include "json_writer.h"
#include "main.h"
-const char * const map_type_name[] = {
- [BPF_MAP_TYPE_UNSPEC] = "unspec",
- [BPF_MAP_TYPE_HASH] = "hash",
- [BPF_MAP_TYPE_ARRAY] = "array",
- [BPF_MAP_TYPE_PROG_ARRAY] = "prog_array",
- [BPF_MAP_TYPE_PERF_EVENT_ARRAY] = "perf_event_array",
- [BPF_MAP_TYPE_PERCPU_HASH] = "percpu_hash",
- [BPF_MAP_TYPE_PERCPU_ARRAY] = "percpu_array",
- [BPF_MAP_TYPE_STACK_TRACE] = "stack_trace",
- [BPF_MAP_TYPE_CGROUP_ARRAY] = "cgroup_array",
- [BPF_MAP_TYPE_LRU_HASH] = "lru_hash",
- [BPF_MAP_TYPE_LRU_PERCPU_HASH] = "lru_percpu_hash",
- [BPF_MAP_TYPE_LPM_TRIE] = "lpm_trie",
- [BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps",
- [BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps",
- [BPF_MAP_TYPE_DEVMAP] = "devmap",
- [BPF_MAP_TYPE_DEVMAP_HASH] = "devmap_hash",
- [BPF_MAP_TYPE_SOCKMAP] = "sockmap",
- [BPF_MAP_TYPE_CPUMAP] = "cpumap",
- [BPF_MAP_TYPE_XSKMAP] = "xskmap",
- [BPF_MAP_TYPE_SOCKHASH] = "sockhash",
- [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage",
- [BPF_MAP_TYPE_REUSEPORT_SOCKARRAY] = "reuseport_sockarray",
- [BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage",
- [BPF_MAP_TYPE_QUEUE] = "queue",
- [BPF_MAP_TYPE_STACK] = "stack",
- [BPF_MAP_TYPE_SK_STORAGE] = "sk_storage",
- [BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops",
- [BPF_MAP_TYPE_RINGBUF] = "ringbuf",
- [BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage",
- [BPF_MAP_TYPE_TASK_STORAGE] = "task_storage",
- [BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter",
-};
-
-const size_t map_type_name_size = ARRAY_SIZE(map_type_name);
-
static struct hashmap *map_table;
static bool map_is_per_cpu(__u32 type)
@@ -81,12 +45,18 @@ static bool map_is_map_of_progs(__u32 type)
static int map_type_from_str(const char *type)
{
+ const char *map_type_str;
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(map_type_name); i++)
+ for (i = 0; ; i++) {
+ map_type_str = libbpf_bpf_map_type_str(i);
+ if (!map_type_str)
+ break;
+
/* Don't allow prefixing in case of possible future shadowing */
- if (map_type_name[i] && !strcmp(map_type_name[i], type))
+ if (!strcmp(map_type_str, type))
return i;
+ }
return -1;
}
@@ -472,9 +442,12 @@ static int parse_elem(char **argv, struct bpf_map_info *info,
static void show_map_header_json(struct bpf_map_info *info, json_writer_t *wtr)
{
+ const char *map_type_str;
+
jsonw_uint_field(wtr, "id", info->id);
- if (info->type < ARRAY_SIZE(map_type_name))
- jsonw_string_field(wtr, "type", map_type_name[info->type]);
+ map_type_str = libbpf_bpf_map_type_str(info->type);
+ if (map_type_str)
+ jsonw_string_field(wtr, "type", map_type_str);
else
jsonw_uint_field(wtr, "type", info->type);
@@ -513,10 +486,12 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
if (owner_prog_type) {
unsigned int prog_type = atoi(owner_prog_type);
+ const char *prog_type_str;
- if (prog_type < prog_type_name_size)
+ prog_type_str = libbpf_bpf_prog_type_str(prog_type);
+ if (prog_type_str)
jsonw_string_field(json_wtr, "owner_prog_type",
- prog_type_name[prog_type]);
+ prog_type_str);
else
jsonw_uint_field(json_wtr, "owner_prog_type",
prog_type);
@@ -559,9 +534,13 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
static void show_map_header_plain(struct bpf_map_info *info)
{
+ const char *map_type_str;
+
printf("%u: ", info->id);
- if (info->type < ARRAY_SIZE(map_type_name))
- printf("%s ", map_type_name[info->type]);
+
+ map_type_str = libbpf_bpf_map_type_str(info->type);
+ if (map_type_str)
+ printf("%s ", map_type_str);
else
printf("type %u ", info->type);
@@ -597,10 +576,11 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
printf("\n\t");
if (owner_prog_type) {
unsigned int prog_type = atoi(owner_prog_type);
+ const char *prog_type_str;
- if (prog_type < prog_type_name_size)
- printf("owner_prog_type %s ",
- prog_type_name[prog_type]);
+ prog_type_str = libbpf_bpf_prog_type_str(prog_type);
+ if (prog_type_str)
+ printf("owner_prog_type %s ", prog_type_str);
else
printf("owner_prog_type %d ", prog_type);
}
@@ -876,9 +856,13 @@ map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr,
}
if (info->type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
- info->value_size != 8)
+ info->value_size != 8) {
+ const char *map_type_str;
+
+ map_type_str = libbpf_bpf_map_type_str(info->type);
p_info("Warning: cannot read values from %s map with value_size != 8",
- map_type_name[info->type]);
+ map_type_str);
+ }
while (true) {
err = bpf_map_get_next_key(fd, prev_key, key);
if (err) {
diff --git a/tools/bpf/bpftool/perf.c b/tools/bpf/bpftool/perf.c
index 50de087b0db7..226ec2c39052 100644
--- a/tools/bpf/bpftool/perf.c
+++ b/tools/bpf/bpftool/perf.c
@@ -11,7 +11,7 @@
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
-#include <ftw.h>
+#include <dirent.h>
#include <bpf/bpf.h>
@@ -147,81 +147,83 @@ static void print_perf_plain(int pid, int fd, __u32 prog_id, __u32 fd_type,
}
}
-static int show_proc(const char *fpath, const struct stat *sb,
- int tflag, struct FTW *ftwbuf)
+static int show_proc(void)
{
+ struct dirent *proc_de, *pid_fd_de;
__u64 probe_offset, probe_addr;
__u32 len, prog_id, fd_type;
- int err, pid = 0, fd = 0;
+ DIR *proc, *pid_fd;
+ int err, pid, fd;
const char *pch;
char buf[4096];
- /* prefix always /proc */
- pch = fpath + 5;
- if (*pch == '\0')
- return 0;
+ proc = opendir("/proc");
+ if (!proc)
+ return -1;
- /* pid should be all numbers */
- pch++;
- while (isdigit(*pch)) {
- pid = pid * 10 + *pch - '0';
- pch++;
- }
- if (*pch == '\0')
- return 0;
- if (*pch != '/')
- return FTW_SKIP_SUBTREE;
-
- /* check /proc/<pid>/fd directory */
- pch++;
- if (strncmp(pch, "fd", 2))
- return FTW_SKIP_SUBTREE;
- pch += 2;
- if (*pch == '\0')
- return 0;
- if (*pch != '/')
- return FTW_SKIP_SUBTREE;
-
- /* check /proc/<pid>/fd/<fd_num> */
- pch++;
- while (isdigit(*pch)) {
- fd = fd * 10 + *pch - '0';
- pch++;
- }
- if (*pch != '\0')
- return FTW_SKIP_SUBTREE;
+ while ((proc_de = readdir(proc))) {
+ pid = 0;
+ pch = proc_de->d_name;
- /* query (pid, fd) for potential perf events */
- len = sizeof(buf);
- err = bpf_task_fd_query(pid, fd, 0, buf, &len, &prog_id, &fd_type,
- &probe_offset, &probe_addr);
- if (err < 0)
- return 0;
+ /* pid should be all numbers */
+ while (isdigit(*pch)) {
+ pid = pid * 10 + *pch - '0';
+ pch++;
+ }
+ if (*pch != '\0')
+ continue;
- if (json_output)
- print_perf_json(pid, fd, prog_id, fd_type, buf, probe_offset,
- probe_addr);
- else
- print_perf_plain(pid, fd, prog_id, fd_type, buf, probe_offset,
- probe_addr);
+ err = snprintf(buf, sizeof(buf), "/proc/%s/fd", proc_de->d_name);
+ if (err < 0 || err >= (int)sizeof(buf))
+ continue;
+
+ pid_fd = opendir(buf);
+ if (!pid_fd)
+ continue;
+ while ((pid_fd_de = readdir(pid_fd))) {
+ fd = 0;
+ pch = pid_fd_de->d_name;
+
+ /* fd should be all numbers */
+ while (isdigit(*pch)) {
+ fd = fd * 10 + *pch - '0';
+ pch++;
+ }
+ if (*pch != '\0')
+ continue;
+
+ /* query (pid, fd) for potential perf events */
+ len = sizeof(buf);
+ err = bpf_task_fd_query(pid, fd, 0, buf, &len,
+ &prog_id, &fd_type,
+ &probe_offset, &probe_addr);
+ if (err < 0)
+ continue;
+
+ if (json_output)
+ print_perf_json(pid, fd, prog_id, fd_type, buf,
+ probe_offset, probe_addr);
+ else
+ print_perf_plain(pid, fd, prog_id, fd_type, buf,
+ probe_offset, probe_addr);
+ }
+ closedir(pid_fd);
+ }
+ closedir(proc);
return 0;
}
static int do_show(int argc, char **argv)
{
- int flags = FTW_ACTIONRETVAL | FTW_PHYS;
- int err = 0, nopenfd = 16;
+ int err;
if (!has_perf_query_support())
return -1;
if (json_output)
jsonw_start_array(json_wtr);
- if (nftw("/proc", show_proc, nopenfd, flags) == -1) {
- p_err("%s", strerror(errno));
- err = -1;
- }
+ err = show_proc();
if (json_output)
jsonw_end_array(json_wtr);
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index bc4e05542c2b..c81362a001ba 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -36,53 +36,28 @@
#define BPF_METADATA_PREFIX "bpf_metadata_"
#define BPF_METADATA_PREFIX_LEN (sizeof(BPF_METADATA_PREFIX) - 1)
-const char * const prog_type_name[] = {
- [BPF_PROG_TYPE_UNSPEC] = "unspec",
- [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
- [BPF_PROG_TYPE_KPROBE] = "kprobe",
- [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls",
- [BPF_PROG_TYPE_SCHED_ACT] = "sched_act",
- [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint",
- [BPF_PROG_TYPE_XDP] = "xdp",
- [BPF_PROG_TYPE_PERF_EVENT] = "perf_event",
- [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb",
- [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock",
- [BPF_PROG_TYPE_LWT_IN] = "lwt_in",
- [BPF_PROG_TYPE_LWT_OUT] = "lwt_out",
- [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
- [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
- [BPF_PROG_TYPE_SK_SKB] = "sk_skb",
- [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
- [BPF_PROG_TYPE_SK_MSG] = "sk_msg",
- [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
- [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr",
- [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local",
- [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2",
- [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport",
- [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
- [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl",
- [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
- [BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt",
- [BPF_PROG_TYPE_TRACING] = "tracing",
- [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops",
- [BPF_PROG_TYPE_EXT] = "ext",
- [BPF_PROG_TYPE_LSM] = "lsm",
- [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup",
-};
-
-const size_t prog_type_name_size = ARRAY_SIZE(prog_type_name);
-
enum dump_mode {
DUMP_JITED,
DUMP_XLATED,
};
+static const bool attach_types[] = {
+ [BPF_SK_SKB_STREAM_PARSER] = true,
+ [BPF_SK_SKB_STREAM_VERDICT] = true,
+ [BPF_SK_SKB_VERDICT] = true,
+ [BPF_SK_MSG_VERDICT] = true,
+ [BPF_FLOW_DISSECTOR] = true,
+ [__MAX_BPF_ATTACH_TYPE] = false,
+};
+
+/* Textual representations traditionally used by the program and kept around
+ * for the sake of backwards compatibility.
+ */
static const char * const attach_type_strings[] = {
[BPF_SK_SKB_STREAM_PARSER] = "stream_parser",
[BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict",
[BPF_SK_SKB_VERDICT] = "skb_verdict",
[BPF_SK_MSG_VERDICT] = "msg_verdict",
- [BPF_FLOW_DISSECTOR] = "flow_dissector",
[__MAX_BPF_ATTACH_TYPE] = NULL,
};
@@ -93,6 +68,14 @@ static enum bpf_attach_type parse_attach_type(const char *str)
enum bpf_attach_type type;
for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
+ if (attach_types[type]) {
+ const char *attach_type_str;
+
+ attach_type_str = libbpf_bpf_attach_type_str(type);
+ if (!strcmp(str, attach_type_str))
+ return type;
+ }
+
if (attach_type_strings[type] &&
is_prefix(str, attach_type_strings[type]))
return type;
@@ -427,12 +410,14 @@ out_free:
static void print_prog_header_json(struct bpf_prog_info *info, int fd)
{
+ const char *prog_type_str;
char prog_name[MAX_PROG_FULL_NAME];
jsonw_uint_field(json_wtr, "id", info->id);
- if (info->type < ARRAY_SIZE(prog_type_name))
- jsonw_string_field(json_wtr, "type",
- prog_type_name[info->type]);
+ prog_type_str = libbpf_bpf_prog_type_str(info->type);
+
+ if (prog_type_str)
+ jsonw_string_field(json_wtr, "type", prog_type_str);
else
jsonw_uint_field(json_wtr, "type", info->type);
@@ -514,11 +499,13 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
static void print_prog_header_plain(struct bpf_prog_info *info, int fd)
{
+ const char *prog_type_str;
char prog_name[MAX_PROG_FULL_NAME];
printf("%u: ", info->id);
- if (info->type < ARRAY_SIZE(prog_type_name))
- printf("%s ", prog_type_name[info->type]);
+ prog_type_str = libbpf_bpf_prog_type_str(info->type);
+ if (prog_type_str)
+ printf("%s ", prog_type_str);
else
printf("type %u ", info->type);
@@ -1975,7 +1962,7 @@ static int profile_parse_metrics(int argc, char **argv)
int selected_cnt = 0;
unsigned int i;
- metric_cnt = sizeof(metrics) / sizeof(struct profile_metric);
+ metric_cnt = ARRAY_SIZE(metrics);
while (argc > 0) {
for (i = 0; i < metric_cnt; i++) {
@@ -2376,8 +2363,8 @@ static int do_help(int argc, char **argv)
" cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n"
" cgroup/getsockopt | cgroup/setsockopt | cgroup/sock_release |\n"
" struct_ops | fentry | fexit | freplace | sk_lookup }\n"
- " ATTACH_TYPE := { msg_verdict | skb_verdict | stream_verdict |\n"
- " stream_parser | flow_dissector }\n"
+ " ATTACH_TYPE := { sk_msg_verdict | sk_skb_verdict | sk_skb_stream_verdict |\n"
+ " sk_skb_stream_parser | flow_dissector }\n"
" METRIC := { cycles | instructions | l1d_loads | llc_misses | itlb_misses | dtlb_misses }\n"
" " HELP_SPEC_OPTIONS " |\n"
" {-f|--bpffs} | {-m|--mapcompat} | {-n|--nomount} |\n"
diff --git a/tools/bpf/bpftool/tracelog.c b/tools/bpf/bpftool/tracelog.c
index e80a5c79b38f..bf1f02212797 100644
--- a/tools/bpf/bpftool/tracelog.c
+++ b/tools/bpf/bpftool/tracelog.c
@@ -9,7 +9,7 @@
#include <string.h>
#include <unistd.h>
#include <linux/magic.h>
-#include <sys/fcntl.h>
+#include <fcntl.h>
#include <sys/vfs.h>
#include "main.h"
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
index 5d26f3c6f918..80cd7843c677 100644
--- a/tools/bpf/resolve_btfids/main.c
+++ b/tools/bpf/resolve_btfids/main.c
@@ -45,6 +45,19 @@
* .zero 4
* __BTF_ID__func__vfs_fallocate__4:
* .zero 4
+ *
+ * set8 - store symbol size into first 4 bytes and sort following
+ * ID list
+ *
+ * __BTF_ID__set8__list:
+ * .zero 8
+ * list:
+ * __BTF_ID__func__vfs_getattr__3:
+ * .zero 4
+ * .word (1 << 0) | (1 << 2)
+ * __BTF_ID__func__vfs_fallocate__5:
+ * .zero 4
+ * .word (1 << 3) | (1 << 1) | (1 << 2)
*/
#define _GNU_SOURCE
@@ -72,6 +85,7 @@
#define BTF_TYPEDEF "typedef"
#define BTF_FUNC "func"
#define BTF_SET "set"
+#define BTF_SET8 "set8"
#define ADDR_CNT 100
@@ -84,6 +98,7 @@ struct btf_id {
};
int addr_cnt;
bool is_set;
+ bool is_set8;
Elf64_Addr addr[ADDR_CNT];
};
@@ -231,14 +246,14 @@ static char *get_id(const char *prefix_end)
return id;
}
-static struct btf_id *add_set(struct object *obj, char *name)
+static struct btf_id *add_set(struct object *obj, char *name, bool is_set8)
{
/*
* __BTF_ID__set__name
* name = ^
* id = ^
*/
- char *id = name + sizeof(BTF_SET "__") - 1;
+ char *id = name + (is_set8 ? sizeof(BTF_SET8 "__") : sizeof(BTF_SET "__")) - 1;
int len = strlen(name);
if (id >= name + len) {
@@ -444,9 +459,21 @@ static int symbols_collect(struct object *obj)
} else if (!strncmp(prefix, BTF_FUNC, sizeof(BTF_FUNC) - 1)) {
obj->nr_funcs++;
id = add_symbol(&obj->funcs, prefix, sizeof(BTF_FUNC) - 1);
+ /* set8 */
+ } else if (!strncmp(prefix, BTF_SET8, sizeof(BTF_SET8) - 1)) {
+ id = add_set(obj, prefix, true);
+ /*
+ * SET8 objects store list's count, which is encoded
+ * in symbol's size, together with 'cnt' field hence
+ * that - 1.
+ */
+ if (id) {
+ id->cnt = sym.st_size / sizeof(uint64_t) - 1;
+ id->is_set8 = true;
+ }
/* set */
} else if (!strncmp(prefix, BTF_SET, sizeof(BTF_SET) - 1)) {
- id = add_set(obj, prefix);
+ id = add_set(obj, prefix, false);
/*
* SET objects store list's count, which is encoded
* in symbol's size, together with 'cnt' field hence
@@ -571,7 +598,8 @@ static int id_patch(struct object *obj, struct btf_id *id)
int *ptr = data->d_buf;
int i;
- if (!id->id && !id->is_set)
+ /* For set, set8, id->id may be 0 */
+ if (!id->id && !id->is_set && !id->is_set8)
pr_err("WARN: resolve_btfids: unresolved symbol %s\n", id->name);
for (i = 0; i < id->addr_cnt; i++) {
@@ -643,13 +671,13 @@ static int sets_patch(struct object *obj)
}
idx = idx / sizeof(int);
- base = &ptr[idx] + 1;
+ base = &ptr[idx] + (id->is_set8 ? 2 : 1);
cnt = ptr[idx];
pr_debug("sorting addr %5lu: cnt %6d [%s]\n",
(idx + 1) * sizeof(int), cnt, id->name);
- qsort(base, cnt, sizeof(int), cmp_id);
+ qsort(base, cnt, id->is_set8 ? sizeof(uint64_t) : sizeof(int), cmp_id);
next = rb_next(next);
}
diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile
index da6de16a3dfb..8b3d87b82b7a 100644
--- a/tools/bpf/runqslower/Makefile
+++ b/tools/bpf/runqslower/Makefile
@@ -4,7 +4,7 @@ include ../../scripts/Makefile.include
OUTPUT ?= $(abspath .output)/
BPFTOOL_OUTPUT := $(OUTPUT)bpftool/
-DEFAULT_BPFTOOL := $(BPFTOOL_OUTPUT)bpftool
+DEFAULT_BPFTOOL := $(BPFTOOL_OUTPUT)bootstrap/bpftool
BPFTOOL ?= $(DEFAULT_BPFTOOL)
LIBBPF_SRC := $(abspath ../../lib/bpf)
BPFOBJ_OUTPUT := $(OUTPUT)libbpf/
@@ -86,6 +86,5 @@ $(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(BPFOBJ_OU
$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(BPFOBJ_OUTPUT) \
DESTDIR=$(BPFOBJ_OUTPUT) prefix= $(abspath $@) install_headers
-$(DEFAULT_BPFTOOL): $(BPFOBJ) | $(BPFTOOL_OUTPUT)
- $(Q)$(MAKE) $(submake_extras) -C ../bpftool OUTPUT=$(BPFTOOL_OUTPUT) \
- ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD)
+$(DEFAULT_BPFTOOL): | $(BPFTOOL_OUTPUT)
+ $(Q)$(MAKE) $(submake_extras) -C ../bpftool OUTPUT=$(BPFTOOL_OUTPUT) bootstrap
diff --git a/tools/bpf/runqslower/runqslower.c b/tools/bpf/runqslower/runqslower.c
index d78f4148597f..83c5993a139a 100644
--- a/tools/bpf/runqslower/runqslower.c
+++ b/tools/bpf/runqslower/runqslower.c
@@ -4,7 +4,6 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <sys/resource.h>
#include <time.h>
#include <bpf/libbpf.h>
#include <bpf/bpf.h>
@@ -88,16 +87,6 @@ int libbpf_print_fn(enum libbpf_print_level level,
return vfprintf(stderr, format, args);
}
-static int bump_memlock_rlimit(void)
-{
- struct rlimit rlim_new = {
- .rlim_cur = RLIM_INFINITY,
- .rlim_max = RLIM_INFINITY,
- };
-
- return setrlimit(RLIMIT_MEMLOCK, &rlim_new);
-}
-
void handle_event(void *ctx, int cpu, void *data, __u32 data_sz)
{
const struct runq_event *e = data;
@@ -133,11 +122,8 @@ int main(int argc, char **argv)
libbpf_set_print(libbpf_print_fn);
- err = bump_memlock_rlimit();
- if (err) {
- fprintf(stderr, "failed to increase rlimit: %d", err);
- return 1;
- }
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
obj = runqslower_bpf__open();
if (!obj) {
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index c6a48d0ef9ff..888a0421d43b 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -99,6 +99,10 @@ FEATURE_TESTS_EXTRA := \
clang \
libbpf \
libbpf-btf__load_from_kernel_by_id \
+ libbpf-bpf_prog_load \
+ libbpf-bpf_object__next_program \
+ libbpf-bpf_object__next_map \
+ libbpf-bpf_create_map \
libpfm4 \
libdebuginfod \
clang-bpf-co-re
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index cb4a2a4fa2e4..7c2a17e23c30 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -58,6 +58,11 @@ FILES= \
test-bpf.bin \
test-libbpf.bin \
test-libbpf-btf__load_from_kernel_by_id.bin \
+ test-libbpf-bpf_prog_load.bin \
+ test-libbpf-bpf_map_create.bin \
+ test-libbpf-bpf_object__next_program.bin \
+ test-libbpf-bpf_object__next_map.bin \
+ test-libbpf-btf__raw_data.bin \
test-get_cpuid.bin \
test-sdt.bin \
test-cxx.bin \
@@ -291,6 +296,21 @@ $(OUTPUT)test-libbpf.bin:
$(OUTPUT)test-libbpf-btf__load_from_kernel_by_id.bin:
$(BUILD) -lbpf
+$(OUTPUT)test-libbpf-bpf_prog_load.bin:
+ $(BUILD) -lbpf
+
+$(OUTPUT)test-libbpf-bpf_map_create.bin:
+ $(BUILD) -lbpf
+
+$(OUTPUT)test-libbpf-bpf_object__next_program.bin:
+ $(BUILD) -lbpf
+
+$(OUTPUT)test-libbpf-bpf_object__next_map.bin:
+ $(BUILD) -lbpf
+
+$(OUTPUT)test-libbpf-btf__raw_data.bin:
+ $(BUILD) -lbpf
+
$(OUTPUT)test-sdt.bin:
$(BUILD)
diff --git a/tools/build/feature/test-libbpf-bpf_map_create.c b/tools/build/feature/test-libbpf-bpf_map_create.c
new file mode 100644
index 000000000000..b9f550e332c8
--- /dev/null
+++ b/tools/build/feature/test-libbpf-bpf_map_create.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bpf/bpf.h>
+
+int main(void)
+{
+ return bpf_map_create(0 /* map_type */, NULL /* map_name */, 0, /* key_size */,
+ 0 /* value_size */, 0 /* max_entries */, NULL /* opts */);
+}
diff --git a/tools/build/feature/test-libbpf-bpf_object__next_map.c b/tools/build/feature/test-libbpf-bpf_object__next_map.c
new file mode 100644
index 000000000000..64adb519e97e
--- /dev/null
+++ b/tools/build/feature/test-libbpf-bpf_object__next_map.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bpf/libbpf.h>
+
+int main(void)
+{
+ bpf_object__next_map(NULL /* obj */, NULL /* prev */);
+ return 0;
+}
diff --git a/tools/build/feature/test-libbpf-bpf_object__next_program.c b/tools/build/feature/test-libbpf-bpf_object__next_program.c
new file mode 100644
index 000000000000..8bf4fd26b545
--- /dev/null
+++ b/tools/build/feature/test-libbpf-bpf_object__next_program.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bpf/libbpf.h>
+
+int main(void)
+{
+ bpf_object__next_program(NULL /* obj */, NULL /* prev */);
+ return 0;
+}
diff --git a/tools/build/feature/test-libbpf-bpf_prog_load.c b/tools/build/feature/test-libbpf-bpf_prog_load.c
new file mode 100644
index 000000000000..47f516d63ebc
--- /dev/null
+++ b/tools/build/feature/test-libbpf-bpf_prog_load.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bpf/bpf.h>
+
+int main(void)
+{
+ return bpf_prog_load(0 /* prog_type */, NULL /* prog_name */,
+ NULL /* license */, NULL /* insns */,
+ 0 /* insn_cnt */, NULL /* opts */);
+}
diff --git a/tools/build/feature/test-libbpf-btf__load_from_kernel_by_id.c b/tools/build/feature/test-libbpf-btf__load_from_kernel_by_id.c
index f7c084428735..a17647f7d5a4 100644
--- a/tools/build/feature/test-libbpf-btf__load_from_kernel_by_id.c
+++ b/tools/build/feature/test-libbpf-btf__load_from_kernel_by_id.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-#include <bpf/libbpf.h>
+#include <bpf/btf.h>
int main(void)
{
- return btf__load_from_kernel_by_id(20151128, NULL);
+ btf__load_from_kernel_by_id(20151128);
+ return 0;
}
diff --git a/tools/build/feature/test-libbpf-btf__raw_data.c b/tools/build/feature/test-libbpf-btf__raw_data.c
new file mode 100644
index 000000000000..57da31dd7581
--- /dev/null
+++ b/tools/build/feature/test-libbpf-btf__raw_data.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bpf/btf.h>
+
+int main(void)
+{
+ btf__raw_data(NULL /* btf_ro */, NULL /* size */);
+ return 0;
+}
diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c
index a2b233fdb572..6c122952c589 100644
--- a/tools/gpio/gpio-event-mon.c
+++ b/tools/gpio/gpio-event-mon.c
@@ -149,6 +149,7 @@ void print_usage(void)
" -r Listen for rising edges\n"
" -f Listen for falling edges\n"
" -w Report the wall-clock time for events\n"
+ " -t Report the hardware timestamp for events\n"
" -b <n> Debounce the line with period n microseconds\n"
" [-c <n>] Do <n> loops (optional, infinite loop if not stated)\n"
" -? This helptext\n"
@@ -174,7 +175,7 @@ int main(int argc, char **argv)
memset(&config, 0, sizeof(config));
config.flags = GPIO_V2_LINE_FLAG_INPUT;
- while ((c = getopt(argc, argv, "c:n:o:b:dsrfw?")) != -1) {
+ while ((c = getopt(argc, argv, "c:n:o:b:dsrfwt?")) != -1) {
switch (c) {
case 'c':
loops = strtoul(optarg, NULL, 10);
@@ -208,6 +209,9 @@ int main(int argc, char **argv)
case 'w':
config.flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
break;
+ case 't':
+ config.flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
+ break;
case '?':
print_usage();
return -1;
diff --git a/tools/include/linux/arm-smccc.h b/tools/include/linux/arm-smccc.h
new file mode 100644
index 000000000000..63ce9bebccd3
--- /dev/null
+++ b/tools/include/linux/arm-smccc.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015, Linaro Limited
+ */
+#ifndef __LINUX_ARM_SMCCC_H
+#define __LINUX_ARM_SMCCC_H
+
+#include <linux/const.h>
+
+/*
+ * This file provides common defines for ARM SMC Calling Convention as
+ * specified in
+ * https://developer.arm.com/docs/den0028/latest
+ *
+ * This code is up-to-date with version DEN 0028 C
+ */
+
+#define ARM_SMCCC_STD_CALL _AC(0,U)
+#define ARM_SMCCC_FAST_CALL _AC(1,U)
+#define ARM_SMCCC_TYPE_SHIFT 31
+
+#define ARM_SMCCC_SMC_32 0
+#define ARM_SMCCC_SMC_64 1
+#define ARM_SMCCC_CALL_CONV_SHIFT 30
+
+#define ARM_SMCCC_OWNER_MASK 0x3F
+#define ARM_SMCCC_OWNER_SHIFT 24
+
+#define ARM_SMCCC_FUNC_MASK 0xFFFF
+
+#define ARM_SMCCC_IS_FAST_CALL(smc_val) \
+ ((smc_val) & (ARM_SMCCC_FAST_CALL << ARM_SMCCC_TYPE_SHIFT))
+#define ARM_SMCCC_IS_64(smc_val) \
+ ((smc_val) & (ARM_SMCCC_SMC_64 << ARM_SMCCC_CALL_CONV_SHIFT))
+#define ARM_SMCCC_FUNC_NUM(smc_val) ((smc_val) & ARM_SMCCC_FUNC_MASK)
+#define ARM_SMCCC_OWNER_NUM(smc_val) \
+ (((smc_val) >> ARM_SMCCC_OWNER_SHIFT) & ARM_SMCCC_OWNER_MASK)
+
+#define ARM_SMCCC_CALL_VAL(type, calling_convention, owner, func_num) \
+ (((type) << ARM_SMCCC_TYPE_SHIFT) | \
+ ((calling_convention) << ARM_SMCCC_CALL_CONV_SHIFT) | \
+ (((owner) & ARM_SMCCC_OWNER_MASK) << ARM_SMCCC_OWNER_SHIFT) | \
+ ((func_num) & ARM_SMCCC_FUNC_MASK))
+
+#define ARM_SMCCC_OWNER_ARCH 0
+#define ARM_SMCCC_OWNER_CPU 1
+#define ARM_SMCCC_OWNER_SIP 2
+#define ARM_SMCCC_OWNER_OEM 3
+#define ARM_SMCCC_OWNER_STANDARD 4
+#define ARM_SMCCC_OWNER_STANDARD_HYP 5
+#define ARM_SMCCC_OWNER_VENDOR_HYP 6
+#define ARM_SMCCC_OWNER_TRUSTED_APP 48
+#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49
+#define ARM_SMCCC_OWNER_TRUSTED_OS 50
+#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63
+
+#define ARM_SMCCC_FUNC_QUERY_CALL_UID 0xff01
+
+#define ARM_SMCCC_QUIRK_NONE 0
+#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */
+
+#define ARM_SMCCC_VERSION_1_0 0x10000
+#define ARM_SMCCC_VERSION_1_1 0x10001
+#define ARM_SMCCC_VERSION_1_2 0x10002
+#define ARM_SMCCC_VERSION_1_3 0x10003
+
+#define ARM_SMCCC_1_3_SVE_HINT 0x10000
+
+#define ARM_SMCCC_VERSION_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0)
+
+#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 1)
+
+#define ARM_SMCCC_ARCH_SOC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 2)
+
+#define ARM_SMCCC_ARCH_WORKAROUND_1 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x8000)
+
+#define ARM_SMCCC_ARCH_WORKAROUND_2 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x7fff)
+
+#define ARM_SMCCC_ARCH_WORKAROUND_3 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x3fff)
+
+#define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_FUNC_QUERY_CALL_UID)
+
+/* KVM UID value: 28b46fb6-2ec5-11e9-a9ca-4b564d003a74 */
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 0xb66fb428U
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 0xe911c52eU
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 0x564bcaa9U
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3 0x743a004dU
+
+/* KVM "vendor specific" services */
+#define ARM_SMCCC_KVM_FUNC_FEATURES 0
+#define ARM_SMCCC_KVM_FUNC_PTP 1
+#define ARM_SMCCC_KVM_FUNC_FEATURES_2 127
+#define ARM_SMCCC_KVM_NUM_FUNCS 128
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_FEATURES)
+
+#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1
+
+/*
+ * ptp_kvm is a feature used for time sync between vm and host.
+ * ptp_kvm module in guest kernel will get service from host using
+ * this hypercall ID.
+ */
+#define ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_PTP)
+
+/* ptp_kvm counter type ID */
+#define KVM_PTP_VIRT_COUNTER 0
+#define KVM_PTP_PHYS_COUNTER 1
+
+/* Paravirtualised time calls (defined by ARM DEN0057A) */
+#define ARM_SMCCC_HV_PV_TIME_FEATURES \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD_HYP, \
+ 0x20)
+
+#define ARM_SMCCC_HV_PV_TIME_ST \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD_HYP, \
+ 0x21)
+
+/* TRNG entropy source calls (defined by ARM DEN0098) */
+#define ARM_SMCCC_TRNG_VERSION \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x50)
+
+#define ARM_SMCCC_TRNG_FEATURES \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x51)
+
+#define ARM_SMCCC_TRNG_GET_UUID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x52)
+
+#define ARM_SMCCC_TRNG_RND32 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x53)
+
+#define ARM_SMCCC_TRNG_RND64 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x53)
+
+/*
+ * Return codes defined in ARM DEN 0070A
+ * ARM DEN 0070A is now merged/consolidated into ARM DEN 0028 C
+ */
+#define SMCCC_RET_SUCCESS 0
+#define SMCCC_RET_NOT_SUPPORTED -1
+#define SMCCC_RET_NOT_REQUIRED -2
+#define SMCCC_RET_INVALID_PARAMETER -3
+
+#endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index ea97804d04d4..afdf93bebaaf 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -16,11 +16,11 @@ void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits);
-int __bitmap_equal(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int bits);
+bool __bitmap_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits);
void bitmap_clear(unsigned long *map, unsigned int start, int len);
-int __bitmap_intersects(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int bits);
+bool __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
@@ -162,8 +162,8 @@ static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
-static inline int bitmap_equal(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static inline bool bitmap_equal(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
@@ -173,8 +173,9 @@ static inline int bitmap_equal(const unsigned long *src1,
return __bitmap_equal(src1, src2, nbits);
}
-static inline int bitmap_intersects(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static inline bool bitmap_intersects(const unsigned long *src1,
+ const unsigned long *src2,
+ unsigned int nbits)
{
if (small_const_nbits(nbits))
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h
index 57890b357f85..71e54b1e3796 100644
--- a/tools/include/linux/btf_ids.h
+++ b/tools/include/linux/btf_ids.h
@@ -73,7 +73,7 @@ asm( \
__BTF_ID_LIST(name, local) \
extern u32 name[];
-#define BTF_ID_LIST_GLOBAL(name) \
+#define BTF_ID_LIST_GLOBAL(name, n) \
__BTF_ID_LIST(name, globl)
/* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with
@@ -82,6 +82,9 @@ __BTF_ID_LIST(name, globl)
#define BTF_ID_LIST_SINGLE(name, prefix, typename) \
BTF_ID_LIST(name) \
BTF_ID(prefix, typename)
+#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) \
+ BTF_ID_LIST_GLOBAL(name, 1) \
+ BTF_ID(prefix, typename)
/*
* The BTF_ID_UNUSED macro defines 4 zero bytes.
@@ -143,13 +146,14 @@ extern struct btf_id_set name;
#else
-#define BTF_ID_LIST(name) static u32 name[5];
+#define BTF_ID_LIST(name) static u32 __maybe_unused name[5];
#define BTF_ID(prefix, name)
#define BTF_ID_UNUSED
-#define BTF_ID_LIST_GLOBAL(name) u32 name[1];
-#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1];
-#define BTF_SET_START(name) static struct btf_id_set name = { 0 };
-#define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 };
+#define BTF_ID_LIST_GLOBAL(name, n) u32 __maybe_unused name[n];
+#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 __maybe_unused name[1];
+#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 __maybe_unused name[1];
+#define BTF_SET_START(name) static struct btf_id_set __maybe_unused name = { 0 };
+#define BTF_SET_START_GLOBAL(name) static struct btf_id_set __maybe_unused name = { 0 };
#define BTF_SET_END(name)
#endif /* CONFIG_DEBUG_INFO_BTF */
@@ -172,7 +176,10 @@ extern struct btf_id_set name;
BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \
BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \
- BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock)
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_MPTCP, mptcp_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCKET, socket)
enum {
#define BTF_SOCK_TYPE(name, str) name,
@@ -184,4 +191,18 @@ MAX_BTF_SOCK_TYPE,
extern u32 btf_sock_ids[];
#endif
+#define BTF_TRACING_TYPE_xxx \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_TASK, task_struct) \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_FILE, file) \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_VMA, vm_area_struct)
+
+enum {
+#define BTF_TRACING_TYPE(name, type) name,
+BTF_TRACING_TYPE_xxx
+#undef BTF_TRACING_TYPE
+MAX_BTF_TRACING_TYPE,
+};
+
+extern u32 btf_tracing_ids[];
+
#endif
diff --git a/tools/include/linux/objtool.h b/tools/include/linux/objtool.h
index 6491fa8fba6d..62c54ffbeeaa 100644
--- a/tools/include/linux/objtool.h
+++ b/tools/include/linux/objtool.h
@@ -32,11 +32,16 @@ struct unwind_hint {
*
* UNWIND_HINT_FUNC: Generate the unwind metadata of a callable function.
* Useful for code which doesn't have an ELF function annotation.
+ *
+ * UNWIND_HINT_ENTRY: machine entry without stack, SYSCALL/SYSENTER etc.
*/
#define UNWIND_HINT_TYPE_CALL 0
#define UNWIND_HINT_TYPE_REGS 1
#define UNWIND_HINT_TYPE_REGS_PARTIAL 2
#define UNWIND_HINT_TYPE_FUNC 3
+#define UNWIND_HINT_TYPE_ENTRY 4
+#define UNWIND_HINT_TYPE_SAVE 5
+#define UNWIND_HINT_TYPE_RESTORE 6
#ifdef CONFIG_OBJTOOL
@@ -62,7 +67,7 @@ struct unwind_hint {
* It should only be used in special cases where you're 100% sure it won't
* affect the reliability of frame pointers and kernel stack traces.
*
- * For more information, see tools/objtool/Documentation/stack-validation.txt.
+ * For more information, see tools/objtool/Documentation/objtool.txt.
*/
#define STACK_FRAME_NON_STANDARD(func) \
static void __used __section(".discard.func_stack_frame_non_standard") \
@@ -124,7 +129,7 @@ struct unwind_hint {
* the debuginfo as necessary. It will also warn if it sees any
* inconsistencies.
*/
-.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0
+.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0
.Lunwind_hint_ip_\@:
.pushsection .discard.unwind_hints
/* struct unwind_hint */
@@ -143,6 +148,12 @@ struct unwind_hint {
.popsection
.endm
+.macro STACK_FRAME_NON_STANDARD_FP func:req
+#ifdef CONFIG_FRAME_POINTER
+ STACK_FRAME_NON_STANDARD \func
+#endif
+.endm
+
.macro ANNOTATE_NOENDBR
.Lhere_\@:
.pushsection .discard.noendbr
@@ -171,7 +182,7 @@ struct unwind_hint {
#define ASM_REACHABLE
#else
#define ANNOTATE_INTRA_FUNCTION_CALL
-.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0
+.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0
.endm
.macro STACK_FRAME_NON_STANDARD func:req
.endm
diff --git a/tools/include/linux/sched/mm.h b/tools/include/linux/sched/mm.h
index c8d9f19c1f35..967294b8edcf 100644
--- a/tools/include/linux/sched/mm.h
+++ b/tools/include/linux/sched/mm.h
@@ -1,4 +1,6 @@
#ifndef _TOOLS_PERF_LINUX_SCHED_MM_H
#define _TOOLS_PERF_LINUX_SCHED_MM_H
+#define might_alloc(gfp) do { } while (0)
+
#endif /* _TOOLS_PERF_LINUX_SCHED_MM_H */
diff --git a/tools/include/nolibc/Makefile b/tools/include/nolibc/Makefile
index 7a16d917c185..cfd06764b5ae 100644
--- a/tools/include/nolibc/Makefile
+++ b/tools/include/nolibc/Makefile
@@ -7,13 +7,46 @@ ifeq ($(srctree),)
srctree := $(patsubst %/tools/include/,%,$(dir $(CURDIR)))
endif
+# when run as make -C tools/ nolibc_<foo> the arch is not set
+ifeq ($(ARCH),)
+include $(srctree)/scripts/subarch.include
+ARCH = $(SUBARCH)
+endif
+
+# OUTPUT is only set when run from the main makefile, otherwise
+# it defaults to this nolibc directory.
+OUTPUT ?= $(CURDIR)/
+
+ifeq ($(V),1)
+Q=
+else
+Q=@
+endif
+
nolibc_arch := $(patsubst arm64,aarch64,$(ARCH))
arch_file := arch-$(nolibc_arch).h
all_files := ctype.h errno.h nolibc.h signal.h std.h stdio.h stdlib.h string.h \
sys.h time.h types.h unistd.h
# install all headers needed to support a bare-metal compiler
-all:
+all: headers
+
+install: help
+
+help:
+ @echo "Supported targets under nolibc:"
+ @echo " all call \"headers\""
+ @echo " clean clean the sysroot"
+ @echo " headers prepare a sysroot in tools/include/nolibc/sysroot"
+ @echo " headers_standalone like \"headers\", and also install kernel headers"
+ @echo " help this help"
+ @echo ""
+ @echo "These targets may also be called from tools as \"make nolibc_<target>\"."
+ @echo ""
+ @echo "Currently using the following variables:"
+ @echo " ARCH = $(ARCH)"
+ @echo " OUTPUT = $(OUTPUT)"
+ @echo ""
# Note: when ARCH is "x86" we concatenate both x86_64 and i386
headers:
@@ -36,7 +69,7 @@ headers:
headers_standalone: headers
$(Q)$(MAKE) -C $(srctree) headers
- $(Q)$(MAKE) -C $(srctree) headers_install INSTALL_HDR_PATH=$(OUTPUT)/sysroot
+ $(Q)$(MAKE) -C $(srctree) headers_install INSTALL_HDR_PATH=$(OUTPUT)sysroot
clean:
$(call QUIET_CLEAN, nolibc) rm -rf "$(OUTPUT)sysroot"
diff --git a/tools/include/nolibc/stdio.h b/tools/include/nolibc/stdio.h
index 15dedf8d0902..a3cebc4bc3ac 100644
--- a/tools/include/nolibc/stdio.h
+++ b/tools/include/nolibc/stdio.h
@@ -273,7 +273,7 @@ int vfprintf(FILE *stream, const char *fmt, va_list args)
return written;
}
-static __attribute__((unused))
+static __attribute__((unused, format(printf, 2, 3)))
int fprintf(FILE *stream, const char *fmt, ...)
{
va_list args;
@@ -285,7 +285,7 @@ int fprintf(FILE *stream, const char *fmt, ...)
return ret;
}
-static __attribute__((unused))
+static __attribute__((unused, format(printf, 1, 2)))
int printf(const char *fmt, ...)
{
va_list args;
diff --git a/tools/include/nolibc/stdlib.h b/tools/include/nolibc/stdlib.h
index 8fd32eaf8037..92378c4b9660 100644
--- a/tools/include/nolibc/stdlib.h
+++ b/tools/include/nolibc/stdlib.h
@@ -128,10 +128,9 @@ void *malloc(size_t len)
static __attribute__((unused))
void *calloc(size_t size, size_t nmemb)
{
- void *orig;
- size_t res = 0;
+ size_t x = size * nmemb;
- if (__builtin_expect(__builtin_mul_overflow(nmemb, size, &res), 0)) {
+ if (__builtin_expect(size && ((x / size) != nmemb), 0)) {
SET_ERRNO(ENOMEM);
return NULL;
}
@@ -140,7 +139,7 @@ void *calloc(size_t size, size_t nmemb)
* No need to zero the heap, the MAP_ANONYMOUS in malloc()
* already does it.
*/
- return malloc(res);
+ return malloc(x);
}
static __attribute__((unused))
diff --git a/tools/include/uapi/asm-generic/fcntl.h b/tools/include/uapi/asm-generic/fcntl.h
index ac190958c981..1ecdb911add8 100644
--- a/tools/include/uapi/asm-generic/fcntl.h
+++ b/tools/include/uapi/asm-generic/fcntl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_GENERIC_FCNTL_H
#define _ASM_GENERIC_FCNTL_H
@@ -90,7 +91,7 @@
/* a horrid kludge trying to make sure that this will fail on old kernels */
#define O_TMPFILE (__O_TMPFILE | O_DIRECTORY)
-#define O_TMPFILE_MASK (__O_TMPFILE | O_DIRECTORY | O_CREAT)
+#define O_TMPFILE_MASK (__O_TMPFILE | O_DIRECTORY | O_CREAT)
#ifndef O_NDELAY
#define O_NDELAY O_NONBLOCK
@@ -115,13 +116,13 @@
#define F_GETSIG 11 /* for sockets. */
#endif
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG == 32 || defined(__KERNEL__)
#ifndef F_GETLK64
#define F_GETLK64 12 /* using 'struct flock64' */
#define F_SETLK64 13
#define F_SETLKW64 14
#endif
-#endif
+#endif /* __BITS_PER_LONG == 32 || defined(__KERNEL__) */
#ifndef F_SETOWN_EX
#define F_SETOWN_EX 15
@@ -180,6 +181,10 @@ struct f_owner_ex {
blocking */
#define LOCK_UN 8 /* remove lock */
+/*
+ * LOCK_MAND support has been removed from the kernel. We leave the symbols
+ * here to not break legacy builds, but these should not be used in new code.
+ */
#define LOCK_MAND 32 /* This is a mandatory flock ... */
#define LOCK_READ 64 /* which allows concurrent read operations */
#define LOCK_WRITE 128 /* which allows concurrent write operations */
@@ -188,24 +193,19 @@ struct f_owner_ex {
#define F_LINUX_SPECIFIC_BASE 1024
#ifndef HAVE_ARCH_STRUCT_FLOCK
-#ifndef __ARCH_FLOCK_PAD
-#define __ARCH_FLOCK_PAD
-#endif
-
struct flock {
short l_type;
short l_whence;
__kernel_off_t l_start;
__kernel_off_t l_len;
__kernel_pid_t l_pid;
- __ARCH_FLOCK_PAD
-};
+#ifdef __ARCH_FLOCK_EXTRA_SYSID
+ __ARCH_FLOCK_EXTRA_SYSID
#endif
-
-#ifndef HAVE_ARCH_STRUCT_FLOCK64
-#ifndef __ARCH_FLOCK64_PAD
-#define __ARCH_FLOCK64_PAD
+#ifdef __ARCH_FLOCK_PAD
+ __ARCH_FLOCK_PAD
#endif
+};
struct flock64 {
short l_type;
@@ -213,8 +213,10 @@ struct flock64 {
__kernel_loff_t l_start;
__kernel_loff_t l_len;
__kernel_pid_t l_pid;
+#ifdef __ARCH_FLOCK64_PAD
__ARCH_FLOCK64_PAD
-};
#endif
+};
+#endif /* HAVE_ARCH_STRUCT_FLOCK */
#endif /* _ASM_GENERIC_FCNTL_H */
diff --git a/tools/include/uapi/asm-generic/socket.h b/tools/include/uapi/asm-generic/socket.h
index 77f7c1638eb1..8756df13be50 100644
--- a/tools/include/uapi/asm-generic/socket.h
+++ b/tools/include/uapi/asm-generic/socket.h
@@ -119,6 +119,8 @@
#define SO_DETACH_REUSEPORT_BPF 68
+#define SO_RCVMARK 75
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 1c48b0ae3ba3..45fa180cc56a 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -383,7 +383,7 @@ __SYSCALL(__NR_syslog, sys_syslog)
/* kernel/ptrace.c */
#define __NR_ptrace 117
-__SYSCALL(__NR_ptrace, sys_ptrace)
+__SC_COMP(__NR_ptrace, sys_ptrace, compat_sys_ptrace)
/* kernel/sched/core.c */
#define __NR_sched_setparam 118
@@ -779,7 +779,7 @@ __SYSCALL(__NR_rseq, sys_rseq)
#define __NR_kexec_file_load 294
__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
/* 295 through 402 are unassigned to sync up with generic numbers, don't use */
-#if __BITS_PER_LONG == 32
+#if defined(__SYSCALL_COMPAT) || __BITS_PER_LONG == 32
#define __NR_clock_gettime64 403
__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
#define __NR_clock_settime64 404
diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h
index edba4d93e9e6..da5206517158 100644
--- a/tools/include/uapi/asm/bitsperlong.h
+++ b/tools/include/uapi/asm/bitsperlong.h
@@ -17,6 +17,8 @@
#include "../../../arch/riscv/include/uapi/asm/bitsperlong.h"
#elif defined(__alpha__)
#include "../../../arch/alpha/include/uapi/asm/bitsperlong.h"
+#elif defined(__loongarch__)
+#include "../../../arch/loongarch/include/uapi/asm/bitsperlong.h"
#else
#include <asm-generic/bitsperlong.h>
#endif
diff --git a/tools/include/uapi/asm/bpf_perf_event.h b/tools/include/uapi/asm/bpf_perf_event.h
index 39acc149d843..d7dfeab0d71a 100644
--- a/tools/include/uapi/asm/bpf_perf_event.h
+++ b/tools/include/uapi/asm/bpf_perf_event.h
@@ -1,5 +1,7 @@
#if defined(__aarch64__)
#include "../../arch/arm64/include/uapi/asm/bpf_perf_event.h"
+#elif defined(__arc__)
+#include "../../arch/arc/include/uapi/asm/bpf_perf_event.h"
#elif defined(__s390__)
#include "../../arch/s390/include/uapi/asm/bpf_perf_event.h"
#elif defined(__riscv)
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 05c3642aaece..b28ff5d88145 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -154,25 +154,77 @@ enum i915_mocs_table_index {
I915_MOCS_CACHED,
};
-/*
+/**
+ * enum drm_i915_gem_engine_class - uapi engine type enumeration
+ *
* Different engines serve different roles, and there may be more than one
- * engine serving each role. enum drm_i915_gem_engine_class provides a
- * classification of the role of the engine, which may be used when requesting
- * operations to be performed on a certain subset of engines, or for providing
- * information about that group.
+ * engine serving each role. This enum provides a classification of the role
+ * of the engine, which may be used when requesting operations to be performed
+ * on a certain subset of engines, or for providing information about that
+ * group.
*/
enum drm_i915_gem_engine_class {
+ /**
+ * @I915_ENGINE_CLASS_RENDER:
+ *
+ * Render engines support instructions used for 3D, Compute (GPGPU),
+ * and programmable media workloads. These instructions fetch data and
+ * dispatch individual work items to threads that operate in parallel.
+ * The threads run small programs (called "kernels" or "shaders") on
+ * the GPU's execution units (EUs).
+ */
I915_ENGINE_CLASS_RENDER = 0,
+
+ /**
+ * @I915_ENGINE_CLASS_COPY:
+ *
+ * Copy engines (also referred to as "blitters") support instructions
+ * that move blocks of data from one location in memory to another,
+ * or that fill a specified location of memory with fixed data.
+ * Copy engines can perform pre-defined logical or bitwise operations
+ * on the source, destination, or pattern data.
+ */
I915_ENGINE_CLASS_COPY = 1,
+
+ /**
+ * @I915_ENGINE_CLASS_VIDEO:
+ *
+ * Video engines (also referred to as "bit stream decode" (BSD) or
+ * "vdbox") support instructions that perform fixed-function media
+ * decode and encode.
+ */
I915_ENGINE_CLASS_VIDEO = 2,
+
+ /**
+ * @I915_ENGINE_CLASS_VIDEO_ENHANCE:
+ *
+ * Video enhancement engines (also referred to as "vebox") support
+ * instructions related to image enhancement.
+ */
I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
- /* should be kept compact */
+ /**
+ * @I915_ENGINE_CLASS_COMPUTE:
+ *
+ * Compute engines support a subset of the instructions available
+ * on render engines: compute engines support Compute (GPGPU) and
+ * programmable media workloads, but do not support the 3D pipeline.
+ */
+ I915_ENGINE_CLASS_COMPUTE = 4,
+ /* Values in this enum should be kept compact. */
+
+ /**
+ * @I915_ENGINE_CLASS_INVALID:
+ *
+ * Placeholder value to represent an invalid engine class assignment.
+ */
I915_ENGINE_CLASS_INVALID = -1
};
-/*
+/**
+ * struct i915_engine_class_instance - Engine class/instance identifier
+ *
* There may be more than one engine fulfilling any role within the system.
* Each engine of a class is given a unique instance number and therefore
* any engine can be specified by its class:instance tuplet. APIs that allow
@@ -180,10 +232,21 @@ enum drm_i915_gem_engine_class {
* for this identification.
*/
struct i915_engine_class_instance {
- __u16 engine_class; /* see enum drm_i915_gem_engine_class */
- __u16 engine_instance;
+ /**
+ * @engine_class:
+ *
+ * Engine class from enum drm_i915_gem_engine_class
+ */
+ __u16 engine_class;
#define I915_ENGINE_CLASS_INVALID_NONE -1
#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
+
+ /**
+ * @engine_instance:
+ *
+ * Engine instance.
+ */
+ __u16 engine_instance;
};
/**
@@ -2060,7 +2123,7 @@ struct i915_context_engines_load_balance {
__u64 mbz64; /* reserved for future use; must be zero */
- struct i915_engine_class_instance engines[0];
+ struct i915_engine_class_instance engines[];
} __attribute__((packed));
#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
@@ -2098,7 +2161,7 @@ struct i915_context_engines_bond {
__u64 flags; /* all undefined flags must be zero */
__u64 mbz64[4]; /* reserved for future use; must be zero */
- struct i915_engine_class_instance engines[0];
+ struct i915_engine_class_instance engines[];
} __attribute__((packed));
#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
@@ -2225,7 +2288,7 @@ struct i915_context_engines_parallel_submit {
* length = width (i) * num_siblings (j)
* index = j + i * num_siblings
*/
- struct i915_engine_class_instance engines[0];
+ struct i915_engine_class_instance engines[];
} __packed;
@@ -2657,24 +2720,65 @@ enum drm_i915_perf_record_type {
DRM_I915_PERF_RECORD_MAX /* non-ABI */
};
-/*
+/**
+ * struct drm_i915_perf_oa_config
+ *
* Structure to upload perf dynamic configuration into the kernel.
*/
struct drm_i915_perf_oa_config {
- /** String formatted like "%08x-%04x-%04x-%04x-%012x" */
+ /**
+ * @uuid:
+ *
+ * String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x"
+ */
char uuid[36];
+ /**
+ * @n_mux_regs:
+ *
+ * Number of mux regs in &mux_regs_ptr.
+ */
__u32 n_mux_regs;
+
+ /**
+ * @n_boolean_regs:
+ *
+ * Number of boolean regs in &boolean_regs_ptr.
+ */
__u32 n_boolean_regs;
+
+ /**
+ * @n_flex_regs:
+ *
+ * Number of flex regs in &flex_regs_ptr.
+ */
__u32 n_flex_regs;
- /*
- * These fields are pointers to tuples of u32 values (register address,
- * value). For example the expected length of the buffer pointed by
- * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
+ /**
+ * @mux_regs_ptr:
+ *
+ * Pointer to tuples of u32 values (register address, value) for mux
+ * registers. Expected length of buffer is (2 * sizeof(u32) *
+ * &n_mux_regs).
*/
__u64 mux_regs_ptr;
+
+ /**
+ * @boolean_regs_ptr:
+ *
+ * Pointer to tuples of u32 values (register address, value) for mux
+ * registers. Expected length of buffer is (2 * sizeof(u32) *
+ * &n_boolean_regs).
+ */
__u64 boolean_regs_ptr;
+
+ /**
+ * @flex_regs_ptr:
+ *
+ * Pointer to tuples of u32 values (register address, value) for mux
+ * registers. Expected length of buffer is (2 * sizeof(u32) *
+ * &n_flex_regs).
+ */
__u64 flex_regs_ptr;
};
@@ -2685,12 +2789,24 @@ struct drm_i915_perf_oa_config {
* @data_ptr is also depends on the specific @query_id.
*/
struct drm_i915_query_item {
- /** @query_id: The id for this query */
+ /**
+ * @query_id:
+ *
+ * The id for this query. Currently accepted query IDs are:
+ * - %DRM_I915_QUERY_TOPOLOGY_INFO (see struct drm_i915_query_topology_info)
+ * - %DRM_I915_QUERY_ENGINE_INFO (see struct drm_i915_engine_info)
+ * - %DRM_I915_QUERY_PERF_CONFIG (see struct drm_i915_query_perf_config)
+ * - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions)
+ * - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`)
+ * - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info)
+ */
__u64 query_id;
-#define DRM_I915_QUERY_TOPOLOGY_INFO 1
-#define DRM_I915_QUERY_ENGINE_INFO 2
-#define DRM_I915_QUERY_PERF_CONFIG 3
-#define DRM_I915_QUERY_MEMORY_REGIONS 4
+#define DRM_I915_QUERY_TOPOLOGY_INFO 1
+#define DRM_I915_QUERY_ENGINE_INFO 2
+#define DRM_I915_QUERY_PERF_CONFIG 3
+#define DRM_I915_QUERY_MEMORY_REGIONS 4
+#define DRM_I915_QUERY_HWCONFIG_BLOB 5
+#define DRM_I915_QUERY_GEOMETRY_SUBSLICES 6
/* Must be kept compact -- no holes and well documented */
/**
@@ -2706,14 +2822,17 @@ struct drm_i915_query_item {
/**
* @flags:
*
- * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
+ * When &query_id == %DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
*
- * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the
+ * When &query_id == %DRM_I915_QUERY_PERF_CONFIG, must be one of the
* following:
*
- * - DRM_I915_QUERY_PERF_CONFIG_LIST
- * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
- * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
+ * - %DRM_I915_QUERY_PERF_CONFIG_LIST
+ * - %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
+ * - %DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
+ *
+ * When &query_id == %DRM_I915_QUERY_GEOMETRY_SUBSLICES must contain
+ * a struct i915_engine_class_instance that references a render engine.
*/
__u32 flags;
#define DRM_I915_QUERY_PERF_CONFIG_LIST 1
@@ -2771,66 +2890,112 @@ struct drm_i915_query {
__u64 items_ptr;
};
-/*
- * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
- *
- * data: contains the 3 pieces of information :
- *
- * - the slice mask with one bit per slice telling whether a slice is
- * available. The availability of slice X can be queried with the following
- * formula :
- *
- * (data[X / 8] >> (X % 8)) & 1
- *
- * - the subslice mask for each slice with one bit per subslice telling
- * whether a subslice is available. Gen12 has dual-subslices, which are
- * similar to two gen11 subslices. For gen12, this array represents dual-
- * subslices. The availability of subslice Y in slice X can be queried
- * with the following formula :
- *
- * (data[subslice_offset +
- * X * subslice_stride +
- * Y / 8] >> (Y % 8)) & 1
- *
- * - the EU mask for each subslice in each slice with one bit per EU telling
- * whether an EU is available. The availability of EU Z in subslice Y in
- * slice X can be queried with the following formula :
+/**
+ * struct drm_i915_query_topology_info
*
- * (data[eu_offset +
- * (X * max_subslices + Y) * eu_stride +
- * Z / 8] >> (Z % 8)) & 1
+ * Describes slice/subslice/EU information queried by
+ * %DRM_I915_QUERY_TOPOLOGY_INFO
*/
struct drm_i915_query_topology_info {
- /*
+ /**
+ * @flags:
+ *
* Unused for now. Must be cleared to zero.
*/
__u16 flags;
+ /**
+ * @max_slices:
+ *
+ * The number of bits used to express the slice mask.
+ */
__u16 max_slices;
+
+ /**
+ * @max_subslices:
+ *
+ * The number of bits used to express the subslice mask.
+ */
__u16 max_subslices;
+
+ /**
+ * @max_eus_per_subslice:
+ *
+ * The number of bits in the EU mask that correspond to a single
+ * subslice's EUs.
+ */
__u16 max_eus_per_subslice;
- /*
+ /**
+ * @subslice_offset:
+ *
* Offset in data[] at which the subslice masks are stored.
*/
__u16 subslice_offset;
- /*
+ /**
+ * @subslice_stride:
+ *
* Stride at which each of the subslice masks for each slice are
* stored.
*/
__u16 subslice_stride;
- /*
+ /**
+ * @eu_offset:
+ *
* Offset in data[] at which the EU masks are stored.
*/
__u16 eu_offset;
- /*
+ /**
+ * @eu_stride:
+ *
* Stride at which each of the EU masks for each subslice are stored.
*/
__u16 eu_stride;
+ /**
+ * @data:
+ *
+ * Contains 3 pieces of information :
+ *
+ * - The slice mask with one bit per slice telling whether a slice is
+ * available. The availability of slice X can be queried with the
+ * following formula :
+ *
+ * .. code:: c
+ *
+ * (data[X / 8] >> (X % 8)) & 1
+ *
+ * Starting with Xe_HP platforms, Intel hardware no longer has
+ * traditional slices so i915 will always report a single slice
+ * (hardcoded slicemask = 0x1) which contains all of the platform's
+ * subslices. I.e., the mask here does not reflect any of the newer
+ * hardware concepts such as "gslices" or "cslices" since userspace
+ * is capable of inferring those from the subslice mask.
+ *
+ * - The subslice mask for each slice with one bit per subslice telling
+ * whether a subslice is available. Starting with Gen12 we use the
+ * term "subslice" to refer to what the hardware documentation
+ * describes as a "dual-subslices." The availability of subslice Y
+ * in slice X can be queried with the following formula :
+ *
+ * .. code:: c
+ *
+ * (data[subslice_offset + X * subslice_stride + Y / 8] >> (Y % 8)) & 1
+ *
+ * - The EU mask for each subslice in each slice, with one bit per EU
+ * telling whether an EU is available. The availability of EU Z in
+ * subslice Y in slice X can be queried with the following formula :
+ *
+ * .. code:: c
+ *
+ * (data[eu_offset +
+ * (X * max_subslices + Y) * eu_stride +
+ * Z / 8
+ * ] >> (Z % 8)) & 1
+ */
__u8 data[];
};
@@ -2951,52 +3116,68 @@ struct drm_i915_query_engine_info {
struct drm_i915_engine_info engines[];
};
-/*
- * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG.
+/**
+ * struct drm_i915_query_perf_config
+ *
+ * Data written by the kernel with query %DRM_I915_QUERY_PERF_CONFIG and
+ * %DRM_I915_QUERY_GEOMETRY_SUBSLICES.
*/
struct drm_i915_query_perf_config {
union {
- /*
- * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets
- * this fields to the number of configurations available.
+ /**
+ * @n_configs:
+ *
+ * When &drm_i915_query_item.flags ==
+ * %DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets this fields to
+ * the number of configurations available.
*/
__u64 n_configs;
- /*
- * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID,
- * i915 will use the value in this field as configuration
- * identifier to decide what data to write into config_ptr.
+ /**
+ * @config:
+ *
+ * When &drm_i915_query_item.flags ==
+ * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, i915 will use the
+ * value in this field as configuration identifier to decide
+ * what data to write into config_ptr.
*/
__u64 config;
- /*
- * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
- * i915 will use the value in this field as configuration
- * identifier to decide what data to write into config_ptr.
+ /**
+ * @uuid:
+ *
+ * When &drm_i915_query_item.flags ==
+ * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, i915 will use the
+ * value in this field as configuration identifier to decide
+ * what data to write into config_ptr.
*
* String formatted like "%08x-%04x-%04x-%04x-%012x"
*/
char uuid[36];
};
- /*
+ /**
+ * @flags:
+ *
* Unused for now. Must be cleared to zero.
*/
__u32 flags;
- /*
- * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will
- * write an array of __u64 of configuration identifiers.
+ /**
+ * @data:
*
- * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will
- * write a struct drm_i915_perf_oa_config. If the following fields of
- * drm_i915_perf_oa_config are set not set to 0, i915 will write into
- * the associated pointers the values of submitted when the
+ * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_LIST,
+ * i915 will write an array of __u64 of configuration identifiers.
+ *
+ * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_DATA,
+ * i915 will write a struct drm_i915_perf_oa_config. If the following
+ * fields of struct drm_i915_perf_oa_config are not set to 0, i915 will
+ * write into the associated pointers the values of submitted when the
* configuration was created :
*
- * - n_mux_regs
- * - n_boolean_regs
- * - n_flex_regs
+ * - &drm_i915_perf_oa_config.n_mux_regs
+ * - &drm_i915_perf_oa_config.n_boolean_regs
+ * - &drm_i915_perf_oa_config.n_flex_regs
*/
__u8 data[];
};
@@ -3135,6 +3316,16 @@ struct drm_i915_query_memory_regions {
};
/**
+ * DOC: GuC HWCONFIG blob uAPI
+ *
+ * The GuC produces a blob with information about the current device.
+ * i915 reads this blob from GuC and makes it available via this uAPI.
+ *
+ * The format and meaning of the blob content are documented in the
+ * Programmer's Reference Manual.
+ */
+
+/**
* struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added
* extension support using struct i915_user_extension.
*
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index d14b10b85e51..59a217ca2dfd 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -998,6 +998,7 @@ enum bpf_attach_type {
BPF_SK_REUSEPORT_SELECT_OR_MIGRATE,
BPF_PERF_EVENT,
BPF_TRACE_KPROBE_MULTI,
+ BPF_LSM_CGROUP,
__MAX_BPF_ATTACH_TYPE
};
@@ -1013,6 +1014,7 @@ enum bpf_link_type {
BPF_LINK_TYPE_XDP = 6,
BPF_LINK_TYPE_PERF_EVENT = 7,
BPF_LINK_TYPE_KPROBE_MULTI = 8,
+ BPF_LINK_TYPE_STRUCT_OPS = 9,
MAX_BPF_LINK_TYPE,
};
@@ -1430,6 +1432,7 @@ union bpf_attr {
__u32 attach_flags;
__aligned_u64 prog_ids;
__u32 prog_cnt;
+ __aligned_u64 prog_attach_flags; /* output: per-program attach_flags */
} query;
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
@@ -1489,6 +1492,15 @@ union bpf_attr {
__aligned_u64 addrs;
__aligned_u64 cookies;
} kprobe_multi;
+ struct {
+ /* this is overlaid with the target_btf_id above. */
+ __u32 target_btf_id;
+ /* black box user-provided value passed through
+ * to BPF program at the execution time and
+ * accessible through bpf_get_attach_cookie() BPF helper
+ */
+ __u64 cookie;
+ } tracing;
};
} link_create;
@@ -2349,7 +2361,8 @@ union bpf_attr {
* Pull in non-linear data in case the *skb* is non-linear and not
* all of *len* are part of the linear section. Make *len* bytes
* from *skb* readable and writable. If a zero value is passed for
- * *len*, then the whole length of the *skb* is pulled.
+ * *len*, then all bytes in the linear part of *skb* will be made
+ * readable and writable.
*
* This helper is only needed for reading and writing with direct
* packet access.
@@ -3587,10 +3600,11 @@ union bpf_attr {
*
* *iph* points to the start of the IPv4 or IPv6 header, while
* *iph_len* contains **sizeof**\ (**struct iphdr**) or
- * **sizeof**\ (**struct ip6hdr**).
+ * **sizeof**\ (**struct ipv6hdr**).
*
* *th* points to the start of the TCP header, while *th_len*
- * contains **sizeof**\ (**struct tcphdr**).
+ * contains the length of the TCP header (at least
+ * **sizeof**\ (**struct tcphdr**)).
* Return
* 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
* error otherwise.
@@ -3773,10 +3787,11 @@ union bpf_attr {
*
* *iph* points to the start of the IPv4 or IPv6 header, while
* *iph_len* contains **sizeof**\ (**struct iphdr**) or
- * **sizeof**\ (**struct ip6hdr**).
+ * **sizeof**\ (**struct ipv6hdr**).
*
* *th* points to the start of the TCP header, while *th_len*
- * contains the length of the TCP header.
+ * contains the length of the TCP header with options (at least
+ * **sizeof**\ (**struct tcphdr**)).
* Return
* On success, lower 32 bits hold the generated SYN cookie in
* followed by 16 bits which hold the MSS value for that cookie,
@@ -5143,6 +5158,179 @@ union bpf_attr {
* The **hash_algo** is returned on success,
* **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if
* invalid arguments are passed.
+ *
+ * void *bpf_kptr_xchg(void *map_value, void *ptr)
+ * Description
+ * Exchange kptr at pointer *map_value* with *ptr*, and return the
+ * old value. *ptr* can be NULL, otherwise it must be a referenced
+ * pointer which will be released when this helper is called.
+ * Return
+ * The old value of kptr (which can be NULL). The returned pointer
+ * if not NULL, is a reference which must be released using its
+ * corresponding release function, or moved into a BPF map before
+ * program exit.
+ *
+ * void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu)
+ * Description
+ * Perform a lookup in *percpu map* for an entry associated to
+ * *key* on *cpu*.
+ * Return
+ * Map value associated to *key* on *cpu*, or **NULL** if no entry
+ * was found or *cpu* is invalid.
+ *
+ * struct mptcp_sock *bpf_skc_to_mptcp_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *mptcp_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or **NULL** otherwise.
+ *
+ * long bpf_dynptr_from_mem(void *data, u32 size, u64 flags, struct bpf_dynptr *ptr)
+ * Description
+ * Get a dynptr to local memory *data*.
+ *
+ * *data* must be a ptr to a map value.
+ * The maximum *size* supported is DYNPTR_MAX_SIZE.
+ * *flags* is currently unused.
+ * Return
+ * 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE,
+ * -EINVAL if flags is not 0.
+ *
+ * long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr)
+ * Description
+ * Reserve *size* bytes of payload in a ring buffer *ringbuf*
+ * through the dynptr interface. *flags* must be 0.
+ *
+ * Please note that a corresponding bpf_ringbuf_submit_dynptr or
+ * bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the
+ * reservation fails. This is enforced by the verifier.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags)
+ * Description
+ * Submit reserved ring buffer sample, pointed to by *data*,
+ * through the dynptr interface. This is a no-op if the dynptr is
+ * invalid/null.
+ *
+ * For more information on *flags*, please see
+ * 'bpf_ringbuf_submit'.
+ * Return
+ * Nothing. Always succeeds.
+ *
+ * void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags)
+ * Description
+ * Discard reserved ring buffer sample through the dynptr
+ * interface. This is a no-op if the dynptr is invalid/null.
+ *
+ * For more information on *flags*, please see
+ * 'bpf_ringbuf_discard'.
+ * Return
+ * Nothing. Always succeeds.
+ *
+ * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset, u64 flags)
+ * Description
+ * Read *len* bytes from *src* into *dst*, starting from *offset*
+ * into *src*.
+ * *flags* is currently unused.
+ * Return
+ * 0 on success, -E2BIG if *offset* + *len* exceeds the length
+ * of *src*'s data, -EINVAL if *src* is an invalid dynptr or if
+ * *flags* is not 0.
+ *
+ * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags)
+ * Description
+ * Write *len* bytes from *src* into *dst*, starting from *offset*
+ * into *dst*.
+ * *flags* is currently unused.
+ * Return
+ * 0 on success, -E2BIG if *offset* + *len* exceeds the length
+ * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst*
+ * is a read-only dynptr or if *flags* is not 0.
+ *
+ * void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len)
+ * Description
+ * Get a pointer to the underlying dynptr data.
+ *
+ * *len* must be a statically known value. The returned data slice
+ * is invalidated whenever the dynptr is invalidated.
+ * Return
+ * Pointer to the underlying dynptr data, NULL if the dynptr is
+ * read-only, if the dynptr is invalid, or if the offset and length
+ * is out of bounds.
+ *
+ * s64 bpf_tcp_raw_gen_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th, u32 th_len)
+ * Description
+ * Try to issue a SYN cookie for the packet with corresponding
+ * IPv4/TCP headers, *iph* and *th*, without depending on a
+ * listening socket.
+ *
+ * *iph* points to the IPv4 header.
+ *
+ * *th* points to the start of the TCP header, while *th_len*
+ * contains the length of the TCP header (at least
+ * **sizeof**\ (**struct tcphdr**)).
+ * Return
+ * On success, lower 32 bits hold the generated SYN cookie in
+ * followed by 16 bits which hold the MSS value for that cookie,
+ * and the top 16 bits are unused.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EINVAL** if *th_len* is invalid.
+ *
+ * s64 bpf_tcp_raw_gen_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th, u32 th_len)
+ * Description
+ * Try to issue a SYN cookie for the packet with corresponding
+ * IPv6/TCP headers, *iph* and *th*, without depending on a
+ * listening socket.
+ *
+ * *iph* points to the IPv6 header.
+ *
+ * *th* points to the start of the TCP header, while *th_len*
+ * contains the length of the TCP header (at least
+ * **sizeof**\ (**struct tcphdr**)).
+ * Return
+ * On success, lower 32 bits hold the generated SYN cookie in
+ * followed by 16 bits which hold the MSS value for that cookie,
+ * and the top 16 bits are unused.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EINVAL** if *th_len* is invalid.
+ *
+ * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
+ *
+ * long bpf_tcp_raw_check_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th)
+ * Description
+ * Check whether *iph* and *th* contain a valid SYN cookie ACK
+ * without depending on a listening socket.
+ *
+ * *iph* points to the IPv4 header.
+ *
+ * *th* points to the TCP header.
+ * Return
+ * 0 if *iph* and *th* are a valid SYN cookie ACK.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EACCES** if the SYN cookie is not valid.
+ *
+ * long bpf_tcp_raw_check_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th)
+ * Description
+ * Check whether *iph* and *th* contain a valid SYN cookie ACK
+ * without depending on a listening socket.
+ *
+ * *iph* points to the IPv6 header.
+ *
+ * *th* points to the TCP header.
+ * Return
+ * 0 if *iph* and *th* are a valid SYN cookie ACK.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EACCES** if the SYN cookie is not valid.
+ *
+ * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -5339,6 +5527,20 @@ union bpf_attr {
FN(copy_from_user_task), \
FN(skb_set_tstamp), \
FN(ima_file_hash), \
+ FN(kptr_xchg), \
+ FN(map_lookup_percpu_elem), \
+ FN(skc_to_mptcp_sock), \
+ FN(dynptr_from_mem), \
+ FN(ringbuf_reserve_dynptr), \
+ FN(ringbuf_submit_dynptr), \
+ FN(ringbuf_discard_dynptr), \
+ FN(dynptr_read), \
+ FN(dynptr_write), \
+ FN(dynptr_data), \
+ FN(tcp_raw_gen_syncookie_ipv4), \
+ FN(tcp_raw_gen_syncookie_ipv6), \
+ FN(tcp_raw_check_syncookie_ipv4), \
+ FN(tcp_raw_check_syncookie_ipv6), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
@@ -5592,6 +5794,10 @@ struct bpf_tunnel_key {
__u8 tunnel_ttl;
__u16 tunnel_ext; /* Padding, future use. */
__u32 tunnel_label;
+ union {
+ __u32 local_ipv4;
+ __u32 local_ipv6[4];
+ };
};
/* user accessible mirror of in-kernel xfrm_state.
@@ -5875,6 +6081,8 @@ struct bpf_prog_info {
__u64 run_cnt;
__u64 recursion_misses;
__u32 verified_insns;
+ __u32 attach_btf_obj_id;
+ __u32 attach_btf_id;
} __attribute__((aligned(8)));
struct bpf_map_info {
@@ -6486,6 +6694,11 @@ struct bpf_timer {
__u64 :64;
} __attribute__((aligned(8)));
+struct bpf_dynptr {
+ __u64 :64;
+ __u64 :64;
+} __attribute__((aligned(8)));
+
struct bpf_sysctl {
__u32 write; /* Sysctl is being read (= 0) or written (= 1).
* Allows 1,2,4-byte read, but no write.
@@ -6577,6 +6790,7 @@ enum bpf_core_relo_kind {
BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */
BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */
BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */
+ BPF_CORE_TYPE_MATCHES = 12, /* type match in target kernel */
};
/*
diff --git a/tools/include/uapi/linux/btf.h b/tools/include/uapi/linux/btf.h
index b0d8fea1951d..ec1798b6d3ff 100644
--- a/tools/include/uapi/linux/btf.h
+++ b/tools/include/uapi/linux/btf.h
@@ -33,13 +33,13 @@ struct btf_type {
/* "info" bits arrangement
* bits 0-15: vlen (e.g. # of struct's members)
* bits 16-23: unused
- * bits 24-27: kind (e.g. int, ptr, array...etc)
- * bits 28-30: unused
+ * bits 24-28: kind (e.g. int, ptr, array...etc)
+ * bits 29-30: unused
* bit 31: kind_flag, currently used by
- * struct, union and fwd
+ * struct, union, enum, fwd and enum64
*/
__u32 info;
- /* "size" is used by INT, ENUM, STRUCT, UNION and DATASEC.
+ /* "size" is used by INT, ENUM, STRUCT, UNION, DATASEC and ENUM64.
* "size" tells the size of the type it is describing.
*
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
@@ -63,7 +63,7 @@ enum {
BTF_KIND_ARRAY = 3, /* Array */
BTF_KIND_STRUCT = 4, /* Struct */
BTF_KIND_UNION = 5, /* Union */
- BTF_KIND_ENUM = 6, /* Enumeration */
+ BTF_KIND_ENUM = 6, /* Enumeration up to 32-bit values */
BTF_KIND_FWD = 7, /* Forward */
BTF_KIND_TYPEDEF = 8, /* Typedef */
BTF_KIND_VOLATILE = 9, /* Volatile */
@@ -76,6 +76,7 @@ enum {
BTF_KIND_FLOAT = 16, /* Floating point */
BTF_KIND_DECL_TAG = 17, /* Decl Tag */
BTF_KIND_TYPE_TAG = 18, /* Type Tag */
+ BTF_KIND_ENUM64 = 19, /* Enumeration up to 64-bit values */
NR_BTF_KINDS,
BTF_KIND_MAX = NR_BTF_KINDS - 1,
@@ -186,4 +187,14 @@ struct btf_decl_tag {
__s32 component_idx;
};
+/* BTF_KIND_ENUM64 is followed by multiple "struct btf_enum64".
+ * The exact number of btf_enum64 is stored in the vlen (of the
+ * info in "struct btf_type").
+ */
+struct btf_enum64 {
+ __u32 name_off;
+ __u32 val_lo32;
+ __u32 val_hi32;
+};
+
#endif /* _UAPI__LINUX_BTF_H__ */
diff --git a/tools/include/uapi/linux/fs.h b/tools/include/uapi/linux/fs.h
index bdf7b404b3e7..b7b56871029c 100644
--- a/tools/include/uapi/linux/fs.h
+++ b/tools/include/uapi/linux/fs.h
@@ -90,7 +90,7 @@ struct file_dedupe_range {
__u16 dest_count; /* in - total elements in info array */
__u16 reserved1; /* must be zero */
__u32 reserved2; /* must be zero */
- struct file_dedupe_range_info info[0];
+ struct file_dedupe_range_info info[];
};
/* And dynamically-tunable limits and defaults: */
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index e1ba2d51b717..0242f31e339c 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -348,6 +348,8 @@ enum {
IFLA_PARENT_DEV_NAME,
IFLA_PARENT_DEV_BUS_NAME,
IFLA_GRO_MAX_SIZE,
+ IFLA_TSO_MAX_SIZE,
+ IFLA_TSO_MAX_SEGS,
__IFLA_MAX
};
@@ -888,6 +890,7 @@ enum {
IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE,
IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE,
+ IFLA_BOND_SLAVE_PRIO,
__IFLA_BOND_SLAVE_MAX,
};
diff --git a/tools/include/uapi/linux/if_tun.h b/tools/include/uapi/linux/if_tun.h
index 454ae31b93c7..2ec07de1d73b 100644
--- a/tools/include/uapi/linux/if_tun.h
+++ b/tools/include/uapi/linux/if_tun.h
@@ -108,7 +108,7 @@ struct tun_pi {
struct tun_filter {
__u16 flags; /* TUN_FLT_ flags see above */
__u16 count; /* Number of addresses */
- __u8 addr[0][ETH_ALEN];
+ __u8 addr[][ETH_ALEN];
};
#endif /* _UAPI__IF_TUN_H */
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 6a184d260c7f..cb6e3846d27b 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -444,6 +444,9 @@ struct kvm_run {
#define KVM_SYSTEM_EVENT_SHUTDOWN 1
#define KVM_SYSTEM_EVENT_RESET 2
#define KVM_SYSTEM_EVENT_CRASH 3
+#define KVM_SYSTEM_EVENT_WAKEUP 4
+#define KVM_SYSTEM_EVENT_SUSPEND 5
+#define KVM_SYSTEM_EVENT_SEV_TERM 6
__u32 type;
__u32 ndata;
union {
@@ -539,7 +542,7 @@ struct kvm_coalesced_mmio {
struct kvm_coalesced_mmio_ring {
__u32 first, last;
- struct kvm_coalesced_mmio coalesced_mmio[0];
+ struct kvm_coalesced_mmio coalesced_mmio[];
};
#define KVM_COALESCED_MMIO_MAX \
@@ -618,7 +621,7 @@ struct kvm_clear_dirty_log {
/* for KVM_SET_SIGNAL_MASK */
struct kvm_signal_mask {
__u32 len;
- __u8 sigset[0];
+ __u8 sigset[];
};
/* for KVM_TPR_ACCESS_REPORTING */
@@ -646,6 +649,7 @@ struct kvm_vapic_addr {
#define KVM_MP_STATE_OPERATING 7
#define KVM_MP_STATE_LOAD 8
#define KVM_MP_STATE_AP_RESET_HOLD 9
+#define KVM_MP_STATE_SUSPENDED 10
struct kvm_mp_state {
__u32 mp_state;
@@ -1150,8 +1154,9 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_S390_MEM_OP_EXTENSION 211
#define KVM_CAP_PMU_CAPABILITY 212
#define KVM_CAP_DISABLE_QUIRKS2 213
-/* #define KVM_CAP_VM_TSC_CONTROL 214 */
+#define KVM_CAP_VM_TSC_CONTROL 214
#define KVM_CAP_SYSTEM_EVENT_DATA 215
+#define KVM_CAP_ARM_SYSTEM_SUSPEND 216
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1216,7 +1221,7 @@ struct kvm_irq_routing_entry {
struct kvm_irq_routing {
__u32 nr;
__u32 flags;
- struct kvm_irq_routing_entry entries[0];
+ struct kvm_irq_routing_entry entries[];
};
#endif
@@ -1240,6 +1245,7 @@ struct kvm_x86_mce {
#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2)
#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3)
#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4)
+#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5)
struct kvm_xen_hvm_config {
__u32 flags;
@@ -1335,7 +1341,7 @@ struct kvm_dirty_tlb {
struct kvm_reg_list {
__u64 n; /* number of regs */
- __u64 reg[0];
+ __u64 reg[];
};
struct kvm_one_reg {
@@ -1478,7 +1484,8 @@ struct kvm_s390_ucas_mapping {
#define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2)
/* Available with KVM_CAP_PPC_GET_PVINFO */
#define KVM_PPC_GET_PVINFO _IOW(KVMIO, 0xa1, struct kvm_ppc_pvinfo)
-/* Available with KVM_CAP_TSC_CONTROL */
+/* Available with KVM_CAP_TSC_CONTROL for a vCPU, or with
+* KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */
#define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2)
#define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3)
/* Available with KVM_CAP_PCI_2_3 */
@@ -1694,6 +1701,32 @@ struct kvm_xen_hvm_attr {
struct {
__u64 gfn;
} shared_info;
+ struct {
+ __u32 send_port;
+ __u32 type; /* EVTCHNSTAT_ipi / EVTCHNSTAT_interdomain */
+ __u32 flags;
+#define KVM_XEN_EVTCHN_DEASSIGN (1 << 0)
+#define KVM_XEN_EVTCHN_UPDATE (1 << 1)
+#define KVM_XEN_EVTCHN_RESET (1 << 2)
+ /*
+ * Events sent by the guest are either looped back to
+ * the guest itself (potentially on a different port#)
+ * or signalled via an eventfd.
+ */
+ union {
+ struct {
+ __u32 port;
+ __u32 vcpu;
+ __u32 priority;
+ } port;
+ struct {
+ __u32 port; /* Zero for eventfd */
+ __s32 fd;
+ } eventfd;
+ __u32 padding[4];
+ } deliver;
+ } evtchn;
+ __u32 xen_version;
__u64 pad[8];
} u;
};
@@ -1702,11 +1735,17 @@ struct kvm_xen_hvm_attr {
#define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0
#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1
#define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
+#define KVM_XEN_ATTR_TYPE_EVTCHN 0x3
+#define KVM_XEN_ATTR_TYPE_XEN_VERSION 0x4
/* Per-vCPU Xen attributes */
#define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr)
#define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr)
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
+#define KVM_XEN_HVM_EVTCHN_SEND _IOW(KVMIO, 0xd0, struct kvm_irq_routing_xen_evtchn)
+
#define KVM_GET_SREGS2 _IOR(KVMIO, 0xcc, struct kvm_sregs2)
#define KVM_SET_SREGS2 _IOW(KVMIO, 0xcd, struct kvm_sregs2)
@@ -1724,6 +1763,13 @@ struct kvm_xen_vcpu_attr {
__u64 time_blocked;
__u64 time_offline;
} runstate;
+ __u32 vcpu_id;
+ struct {
+ __u32 port;
+ __u32 priority;
+ __u64 expires_ns;
+ } timer;
+ __u8 vector;
} u;
};
@@ -1734,6 +1780,10 @@ struct kvm_xen_vcpu_attr {
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
+#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6
+#define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7
+#define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR 0x8
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
@@ -2033,7 +2083,8 @@ struct kvm_stats_header {
#define KVM_STATS_UNIT_BYTES (0x1 << KVM_STATS_UNIT_SHIFT)
#define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT)
#define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT)
-#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_CYCLES
+#define KVM_STATS_UNIT_BOOLEAN (0x4 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_BOOLEAN
#define KVM_STATS_BASE_SHIFT 8
#define KVM_STATS_BASE_MASK (0xF << KVM_STATS_BASE_SHIFT)
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index d37629dbad72..4653834f078f 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -491,7 +491,7 @@ struct perf_event_query_bpf {
/*
* User provided buffer to store program ids
*/
- __u32 ids[0];
+ __u32 ids[];
};
/*
diff --git a/tools/include/uapi/linux/pkt_cls.h b/tools/include/uapi/linux/pkt_cls.h
index 12153771396a..3faee0199a9b 100644
--- a/tools/include/uapi/linux/pkt_cls.h
+++ b/tools/include/uapi/linux/pkt_cls.h
@@ -180,7 +180,7 @@ struct tc_u32_sel {
short hoff;
__be32 hmask;
- struct tc_u32_key keys[0];
+ struct tc_u32_key keys[];
};
struct tc_u32_mark {
@@ -192,7 +192,7 @@ struct tc_u32_mark {
struct tc_u32_pcnt {
__u64 rcnt;
__u64 rhit;
- __u64 kcnts[0];
+ __u64 kcnts[];
};
/* Flags */
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index e998764f0262..a5e06dcbba13 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -272,6 +272,15 @@ struct prctl_mm_map {
# define PR_SCHED_CORE_SCOPE_THREAD_GROUP 1
# define PR_SCHED_CORE_SCOPE_PROCESS_GROUP 2
+/* arm64 Scalable Matrix Extension controls */
+/* Flag values must be in sync with SVE versions */
+#define PR_SME_SET_VL 63 /* set task vector length */
+# define PR_SME_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */
+#define PR_SME_GET_VL 64 /* get task vector length */
+/* Bits common to PR_SME_SET_VL and PR_SME_GET_VL */
+# define PR_SME_VL_LEN_MASK 0xffff
+# define PR_SME_VL_INHERIT (1 << 17) /* inherit across exec */
+
#define PR_SET_VMA 0x53564d41
# define PR_SET_VMA_ANON_NAME 0
diff --git a/tools/include/uapi/linux/seg6.h b/tools/include/uapi/linux/seg6.h
index 286e8d6a8e98..f94baf154c47 100644
--- a/tools/include/uapi/linux/seg6.h
+++ b/tools/include/uapi/linux/seg6.h
@@ -30,7 +30,7 @@ struct ipv6_sr_hdr {
__u8 flags;
__u16 tag;
- struct in6_addr segments[0];
+ struct in6_addr segments[];
};
#define SR6_FLAG1_PROTECTED (1 << 6)
@@ -49,7 +49,7 @@ struct ipv6_sr_hdr {
struct sr6_tlv {
__u8 type;
__u8 len;
- __u8 data[0];
+ __u8 data[];
};
#endif
diff --git a/tools/include/uapi/linux/usbdevice_fs.h b/tools/include/uapi/linux/usbdevice_fs.h
index cf525cddeb94..74a84e02422a 100644
--- a/tools/include/uapi/linux/usbdevice_fs.h
+++ b/tools/include/uapi/linux/usbdevice_fs.h
@@ -131,7 +131,7 @@ struct usbdevfs_urb {
unsigned int signr; /* signal to be sent on completion,
or 0 if none should be sent. */
void __user *usercontext;
- struct usbdevfs_iso_packet_desc iso_frame_desc[0];
+ struct usbdevfs_iso_packet_desc iso_frame_desc[];
};
/* ioctls for talking directly to drivers */
@@ -176,7 +176,7 @@ struct usbdevfs_disconnect_claim {
struct usbdevfs_streams {
unsigned int num_streams; /* Not used by USBDEVFS_FREE_STREAMS */
unsigned int num_eps;
- unsigned char eps[0];
+ unsigned char eps[];
};
/*
diff --git a/tools/include/uapi/linux/vhost.h b/tools/include/uapi/linux/vhost.h
index 5d99e7c242a2..cab645d4a645 100644
--- a/tools/include/uapi/linux/vhost.h
+++ b/tools/include/uapi/linux/vhost.h
@@ -89,11 +89,6 @@
/* Set or get vhost backend capability */
-/* Use message type V2 */
-#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
-/* IOTLB can accept batching hints */
-#define VHOST_BACKEND_F_IOTLB_BATCH 0x2
-
#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
@@ -150,11 +145,30 @@
/* Get the valid iova range */
#define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \
struct vhost_vdpa_iova_range)
-
/* Get the config size */
#define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32)
/* Get the count of all virtqueues */
#define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32)
+/* Get the number of virtqueue groups. */
+#define VHOST_VDPA_GET_GROUP_NUM _IOR(VHOST_VIRTIO, 0x81, __u32)
+
+/* Get the number of address spaces. */
+#define VHOST_VDPA_GET_AS_NUM _IOR(VHOST_VIRTIO, 0x7A, unsigned int)
+
+/* Get the group for a virtqueue: read index, write group in num,
+ * The virtqueue index is stored in the index field of
+ * vhost_vring_state. The group for this specific virtqueue is
+ * returned via num field of vhost_vring_state.
+ */
+#define VHOST_VDPA_GET_VRING_GROUP _IOWR(VHOST_VIRTIO, 0x7B, \
+ struct vhost_vring_state)
+/* Set the ASID for a virtqueue group. The group index is stored in
+ * the index field of vhost_vring_state, the ASID associated with this
+ * group is stored at num field of vhost_vring_state.
+ */
+#define VHOST_VDPA_SET_GROUP_ASID _IOW(VHOST_VIRTIO, 0x7C, \
+ struct vhost_vring_state)
+
#endif
diff --git a/tools/include/uapi/sound/asound.h b/tools/include/uapi/sound/asound.h
index 2d3e5df39a59..3974a2a911cc 100644
--- a/tools/include/uapi/sound/asound.h
+++ b/tools/include/uapi/sound/asound.h
@@ -1106,7 +1106,7 @@ struct snd_ctl_elem_value {
struct snd_ctl_tlv {
unsigned int numid; /* control element numeric identification */
unsigned int length; /* in bytes aligned to 4 */
- unsigned int tlv[0]; /* first TLV */
+ unsigned int tlv[]; /* first TLV */
};
#define SNDRV_CTL_IOCTL_PVERSION _IOR('U', 0x00, int)
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index 5a5bd74f55bd..9c366b3a676d 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -1646,7 +1646,8 @@ Press any other key to refresh statistics immediately.
.format(values))
if len(pids) > 1:
sys.exit('Error: Multiple processes found (pids: {}). Use "-p"'
- ' to specify the desired pid'.format(" ".join(pids)))
+ ' to specify the desired pid'
+ .format(" ".join(map(str, pids))))
namespace.pid = pids[0]
argparser = argparse.ArgumentParser(description=description_text,
diff --git a/tools/lib/bitmap.c b/tools/lib/bitmap.c
index db466ef7be9d..354f8cdc0880 100644
--- a/tools/lib/bitmap.c
+++ b/tools/lib/bitmap.c
@@ -72,31 +72,31 @@ int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
return result != 0;
}
-int __bitmap_equal(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int bits)
+bool __bitmap_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] != bitmap2[k])
- return 0;
+ return false;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
- return 0;
+ return false;
- return 1;
+ return true;
}
-int __bitmap_intersects(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int bits)
+bool __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] & bitmap2[k])
- return 1;
+ return true;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
- return 1;
- return 0;
+ return true;
+ return false;
}
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index 94f0a146bb7b..5a3dfb56d78f 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1,3 +1,4 @@
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
- netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o hashmap.o \
- btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o
+ netlink.o bpf_prog_linfo.o libbpf_probes.o hashmap.o \
+ btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \
+ usdt.o
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 064c89e31560..4c904ef0b47e 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -127,7 +127,7 @@ TAGS_PROG := $(if $(shell which etags 2>/dev/null),etags,ctags)
GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \
cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \
sed 's/\[.*\]//' | \
- awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \
+ awk '/GLOBAL/ && /DEFAULT/ && !/UND|ABS/ {print $$NF}' | \
sort -u | wc -l)
VERSIONED_SYM_COUNT = $(shell readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \
sed 's/\[.*\]//' | \
@@ -237,9 +237,9 @@ install_lib: all_cmd
$(call do_install_mkdir,$(libdir_SQ)); \
cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ)
-SRC_HDRS := bpf.h libbpf.h btf.h libbpf_common.h libbpf_legacy.h xsk.h \
+SRC_HDRS := bpf.h libbpf.h btf.h libbpf_common.h libbpf_legacy.h \
bpf_helpers.h bpf_tracing.h bpf_endian.h bpf_core_read.h \
- skel_internal.h libbpf_version.h
+ skel_internal.h libbpf_version.h usdt.bpf.h
GEN_HDRS := $(BPF_GENERATED)
INSTALL_PFX := $(DESTDIR)$(prefix)/include/bpf
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index cf27251adb92..efcc06dafbd9 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -147,10 +147,6 @@ int bump_rlimit_memlock(void)
{
struct rlimit rlim;
- /* this the default in libbpf 1.0, but for now user has to opt-in explicitly */
- if (!(libbpf_mode & LIBBPF_STRICT_AUTO_RLIMIT_MEMLOCK))
- return 0;
-
/* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */
if (memlock_bumped || kernel_supports(NULL, FEAT_MEMCG_ACCOUNT))
return 0;
@@ -208,86 +204,6 @@ int bpf_map_create(enum bpf_map_type map_type,
return libbpf_err_errno(fd);
}
-int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
-{
- LIBBPF_OPTS(bpf_map_create_opts, p);
-
- p.map_flags = create_attr->map_flags;
- p.numa_node = create_attr->numa_node;
- p.btf_fd = create_attr->btf_fd;
- p.btf_key_type_id = create_attr->btf_key_type_id;
- p.btf_value_type_id = create_attr->btf_value_type_id;
- p.map_ifindex = create_attr->map_ifindex;
- if (create_attr->map_type == BPF_MAP_TYPE_STRUCT_OPS)
- p.btf_vmlinux_value_type_id = create_attr->btf_vmlinux_value_type_id;
- else
- p.inner_map_fd = create_attr->inner_map_fd;
-
- return bpf_map_create(create_attr->map_type, create_attr->name,
- create_attr->key_size, create_attr->value_size,
- create_attr->max_entries, &p);
-}
-
-int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
- int key_size, int value_size, int max_entries,
- __u32 map_flags, int node)
-{
- LIBBPF_OPTS(bpf_map_create_opts, opts);
-
- opts.map_flags = map_flags;
- if (node >= 0) {
- opts.numa_node = node;
- opts.map_flags |= BPF_F_NUMA_NODE;
- }
-
- return bpf_map_create(map_type, name, key_size, value_size, max_entries, &opts);
-}
-
-int bpf_create_map(enum bpf_map_type map_type, int key_size,
- int value_size, int max_entries, __u32 map_flags)
-{
- LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = map_flags);
-
- return bpf_map_create(map_type, NULL, key_size, value_size, max_entries, &opts);
-}
-
-int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
- int key_size, int value_size, int max_entries,
- __u32 map_flags)
-{
- LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = map_flags);
-
- return bpf_map_create(map_type, name, key_size, value_size, max_entries, &opts);
-}
-
-int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
- int key_size, int inner_map_fd, int max_entries,
- __u32 map_flags, int node)
-{
- LIBBPF_OPTS(bpf_map_create_opts, opts);
-
- opts.inner_map_fd = inner_map_fd;
- opts.map_flags = map_flags;
- if (node >= 0) {
- opts.map_flags |= BPF_F_NUMA_NODE;
- opts.numa_node = node;
- }
-
- return bpf_map_create(map_type, name, key_size, 4, max_entries, &opts);
-}
-
-int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
- int key_size, int inner_map_fd, int max_entries,
- __u32 map_flags)
-{
- LIBBPF_OPTS(bpf_map_create_opts, opts,
- .inner_map_fd = inner_map_fd,
- .map_flags = map_flags,
- );
-
- return bpf_map_create(map_type, name, key_size, 4, max_entries, &opts);
-}
-
static void *
alloc_zero_tailing_info(const void *orecord, __u32 cnt,
__u32 actual_rec_size, __u32 expected_rec_size)
@@ -313,11 +229,10 @@ alloc_zero_tailing_info(const void *orecord, __u32 cnt,
return info;
}
-DEFAULT_VERSION(bpf_prog_load_v0_6_0, bpf_prog_load, LIBBPF_0.6.0)
-int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
- const char *prog_name, const char *license,
- const struct bpf_insn *insns, size_t insn_cnt,
- const struct bpf_prog_load_opts *opts)
+int bpf_prog_load(enum bpf_prog_type prog_type,
+ const char *prog_name, const char *license,
+ const struct bpf_insn *insns, size_t insn_cnt,
+ const struct bpf_prog_load_opts *opts)
{
void *finfo = NULL, *linfo = NULL;
const char *func_info, *line_info;
@@ -464,94 +379,6 @@ done:
return libbpf_err_errno(fd);
}
-__attribute__((alias("bpf_load_program_xattr2")))
-int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
- char *log_buf, size_t log_buf_sz);
-
-static int bpf_load_program_xattr2(const struct bpf_load_program_attr *load_attr,
- char *log_buf, size_t log_buf_sz)
-{
- LIBBPF_OPTS(bpf_prog_load_opts, p);
-
- if (!load_attr || !log_buf != !log_buf_sz)
- return libbpf_err(-EINVAL);
-
- p.expected_attach_type = load_attr->expected_attach_type;
- switch (load_attr->prog_type) {
- case BPF_PROG_TYPE_STRUCT_OPS:
- case BPF_PROG_TYPE_LSM:
- p.attach_btf_id = load_attr->attach_btf_id;
- break;
- case BPF_PROG_TYPE_TRACING:
- case BPF_PROG_TYPE_EXT:
- p.attach_btf_id = load_attr->attach_btf_id;
- p.attach_prog_fd = load_attr->attach_prog_fd;
- break;
- default:
- p.prog_ifindex = load_attr->prog_ifindex;
- p.kern_version = load_attr->kern_version;
- }
- p.log_level = load_attr->log_level;
- p.log_buf = log_buf;
- p.log_size = log_buf_sz;
- p.prog_btf_fd = load_attr->prog_btf_fd;
- p.func_info_rec_size = load_attr->func_info_rec_size;
- p.func_info_cnt = load_attr->func_info_cnt;
- p.func_info = load_attr->func_info;
- p.line_info_rec_size = load_attr->line_info_rec_size;
- p.line_info_cnt = load_attr->line_info_cnt;
- p.line_info = load_attr->line_info;
- p.prog_flags = load_attr->prog_flags;
-
- return bpf_prog_load(load_attr->prog_type, load_attr->name, load_attr->license,
- load_attr->insns, load_attr->insns_cnt, &p);
-}
-
-int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
- size_t insns_cnt, const char *license,
- __u32 kern_version, char *log_buf,
- size_t log_buf_sz)
-{
- struct bpf_load_program_attr load_attr;
-
- memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
- load_attr.prog_type = type;
- load_attr.expected_attach_type = 0;
- load_attr.name = NULL;
- load_attr.insns = insns;
- load_attr.insns_cnt = insns_cnt;
- load_attr.license = license;
- load_attr.kern_version = kern_version;
-
- return bpf_load_program_xattr2(&load_attr, log_buf, log_buf_sz);
-}
-
-int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
- size_t insns_cnt, __u32 prog_flags, const char *license,
- __u32 kern_version, char *log_buf, size_t log_buf_sz,
- int log_level)
-{
- union bpf_attr attr;
- int fd;
-
- bump_rlimit_memlock();
-
- memset(&attr, 0, sizeof(attr));
- attr.prog_type = type;
- attr.insn_cnt = (__u32)insns_cnt;
- attr.insns = ptr_to_u64(insns);
- attr.license = ptr_to_u64(license);
- attr.log_buf = ptr_to_u64(log_buf);
- attr.log_size = log_buf_sz;
- attr.log_level = log_level;
- log_buf[0] = 0;
- attr.kern_version = kern_version;
- attr.prog_flags = prog_flags;
-
- fd = sys_bpf_prog_load(&attr, sizeof(attr), PROG_LOAD_ATTEMPTS);
- return libbpf_err_errno(fd);
-}
-
int bpf_map_update_elem(int fd, const void *key, const void *value,
__u64 flags)
{
@@ -639,6 +466,20 @@ int bpf_map_delete_elem(int fd, const void *key)
return libbpf_err_errno(ret);
}
+int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags)
+{
+ union bpf_attr attr;
+ int ret;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.map_fd = fd;
+ attr.key = ptr_to_u64(key);
+ attr.flags = flags;
+
+ ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
+ return libbpf_err_errno(ret);
+}
+
int bpf_map_get_next_key(int fd, const void *key, void *next_key)
{
union bpf_attr attr;
@@ -738,11 +579,20 @@ int bpf_obj_pin(int fd, const char *pathname)
int bpf_obj_get(const char *pathname)
{
+ return bpf_obj_get_opts(pathname, NULL);
+}
+
+int bpf_obj_get_opts(const char *pathname, const struct bpf_obj_get_opts *opts)
+{
union bpf_attr attr;
int fd;
+ if (!OPTS_VALID(opts, bpf_obj_get_opts))
+ return libbpf_err(-EINVAL);
+
memset(&attr, 0, sizeof(attr));
attr.pathname = ptr_to_u64((void *)pathname);
+ attr.file_flags = OPTS_GET(opts, file_flags, 0);
fd = sys_bpf_fd(BPF_OBJ_GET, &attr, sizeof(attr));
return libbpf_err_errno(fd);
@@ -817,7 +667,7 @@ int bpf_link_create(int prog_fd, int target_fd,
{
__u32 target_btf_id, iter_info_len;
union bpf_attr attr;
- int fd;
+ int fd, err;
if (!OPTS_VALID(opts, bpf_link_create_opts))
return libbpf_err(-EINVAL);
@@ -863,6 +713,14 @@ int bpf_link_create(int prog_fd, int target_fd,
if (!OPTS_ZEROED(opts, kprobe_multi))
return libbpf_err(-EINVAL);
break;
+ case BPF_TRACE_FENTRY:
+ case BPF_TRACE_FEXIT:
+ case BPF_MODIFY_RETURN:
+ case BPF_LSM_MAC:
+ attr.link_create.tracing.cookie = OPTS_GET(opts, tracing.cookie, 0);
+ if (!OPTS_ZEROED(opts, tracing))
+ return libbpf_err(-EINVAL);
+ break;
default:
if (!OPTS_ZEROED(opts, flags))
return libbpf_err(-EINVAL);
@@ -870,7 +728,37 @@ int bpf_link_create(int prog_fd, int target_fd,
}
proceed:
fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, sizeof(attr));
- return libbpf_err_errno(fd);
+ if (fd >= 0)
+ return fd;
+ /* we'll get EINVAL if LINK_CREATE doesn't support attaching fentry
+ * and other similar programs
+ */
+ err = -errno;
+ if (err != -EINVAL)
+ return libbpf_err(err);
+
+ /* if user used features not supported by
+ * BPF_RAW_TRACEPOINT_OPEN command, then just give up immediately
+ */
+ if (attr.link_create.target_fd || attr.link_create.target_btf_id)
+ return libbpf_err(err);
+ if (!OPTS_ZEROED(opts, sz))
+ return libbpf_err(err);
+
+ /* otherwise, for few select kinds of programs that can be
+ * attached using BPF_RAW_TRACEPOINT_OPEN command, try that as
+ * a fallback for older kernels
+ */
+ switch (attach_type) {
+ case BPF_TRACE_RAW_TP:
+ case BPF_LSM_MAC:
+ case BPF_TRACE_FENTRY:
+ case BPF_TRACE_FEXIT:
+ case BPF_MODIFY_RETURN:
+ return bpf_raw_tracepoint_open(NULL, prog_fd);
+ default:
+ return libbpf_err(err);
+ }
}
int bpf_link_detach(int link_fd)
@@ -916,80 +804,48 @@ int bpf_iter_create(int link_fd)
return libbpf_err_errno(fd);
}
-int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
- __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
+int bpf_prog_query_opts(int target_fd,
+ enum bpf_attach_type type,
+ struct bpf_prog_query_opts *opts)
{
union bpf_attr attr;
int ret;
+ if (!OPTS_VALID(opts, bpf_prog_query_opts))
+ return libbpf_err(-EINVAL);
+
memset(&attr, 0, sizeof(attr));
+
attr.query.target_fd = target_fd;
attr.query.attach_type = type;
- attr.query.query_flags = query_flags;
- attr.query.prog_cnt = *prog_cnt;
- attr.query.prog_ids = ptr_to_u64(prog_ids);
+ attr.query.query_flags = OPTS_GET(opts, query_flags, 0);
+ attr.query.prog_cnt = OPTS_GET(opts, prog_cnt, 0);
+ attr.query.prog_ids = ptr_to_u64(OPTS_GET(opts, prog_ids, NULL));
+ attr.query.prog_attach_flags = ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL));
ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr));
- if (attach_flags)
- *attach_flags = attr.query.attach_flags;
- *prog_cnt = attr.query.prog_cnt;
+ OPTS_SET(opts, attach_flags, attr.query.attach_flags);
+ OPTS_SET(opts, prog_cnt, attr.query.prog_cnt);
return libbpf_err_errno(ret);
}
-int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
- void *data_out, __u32 *size_out, __u32 *retval,
- __u32 *duration)
+int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
+ __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
{
- union bpf_attr attr;
+ LIBBPF_OPTS(bpf_prog_query_opts, opts);
int ret;
- memset(&attr, 0, sizeof(attr));
- attr.test.prog_fd = prog_fd;
- attr.test.data_in = ptr_to_u64(data);
- attr.test.data_out = ptr_to_u64(data_out);
- attr.test.data_size_in = size;
- attr.test.repeat = repeat;
-
- ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
-
- if (size_out)
- *size_out = attr.test.data_size_out;
- if (retval)
- *retval = attr.test.retval;
- if (duration)
- *duration = attr.test.duration;
+ opts.query_flags = query_flags;
+ opts.prog_ids = prog_ids;
+ opts.prog_cnt = *prog_cnt;
- return libbpf_err_errno(ret);
-}
-
-int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
-{
- union bpf_attr attr;
- int ret;
-
- if (!test_attr->data_out && test_attr->data_size_out > 0)
- return libbpf_err(-EINVAL);
+ ret = bpf_prog_query_opts(target_fd, type, &opts);
- memset(&attr, 0, sizeof(attr));
- attr.test.prog_fd = test_attr->prog_fd;
- attr.test.data_in = ptr_to_u64(test_attr->data_in);
- attr.test.data_out = ptr_to_u64(test_attr->data_out);
- attr.test.data_size_in = test_attr->data_size_in;
- attr.test.data_size_out = test_attr->data_size_out;
- attr.test.ctx_in = ptr_to_u64(test_attr->ctx_in);
- attr.test.ctx_out = ptr_to_u64(test_attr->ctx_out);
- attr.test.ctx_size_in = test_attr->ctx_size_in;
- attr.test.ctx_size_out = test_attr->ctx_size_out;
- attr.test.repeat = test_attr->repeat;
-
- ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
-
- test_attr->data_size_out = attr.test.data_size_out;
- test_attr->ctx_size_out = attr.test.ctx_size_out;
- test_attr->retval = attr.test.retval;
- test_attr->duration = attr.test.duration;
+ if (attach_flags)
+ *attach_flags = opts.attach_flags;
+ *prog_cnt = opts.prog_cnt;
return libbpf_err_errno(ret);
}
@@ -1190,27 +1046,6 @@ int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_loa
return libbpf_err_errno(fd);
}
-int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size, bool do_log)
-{
- LIBBPF_OPTS(bpf_btf_load_opts, opts);
- int fd;
-
-retry:
- if (do_log && log_buf && log_buf_size) {
- opts.log_buf = log_buf;
- opts.log_size = log_buf_size;
- opts.log_level = 1;
- }
-
- fd = bpf_btf_load(btf, btf_size, &opts);
- if (fd < 0 && !do_log && log_buf && log_buf_size) {
- do_log = true;
- goto retry;
- }
-
- return libbpf_err_errno(fd);
-}
-
int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
__u32 *prog_id, __u32 *fd_type, __u64 *probe_offset,
__u64 *probe_addr)
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index f4b4afb6d4ba..9c50beabdd14 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -61,48 +61,6 @@ LIBBPF_API int bpf_map_create(enum bpf_map_type map_type,
__u32 max_entries,
const struct bpf_map_create_opts *opts);
-struct bpf_create_map_attr {
- const char *name;
- enum bpf_map_type map_type;
- __u32 map_flags;
- __u32 key_size;
- __u32 value_size;
- __u32 max_entries;
- __u32 numa_node;
- __u32 btf_fd;
- __u32 btf_key_type_id;
- __u32 btf_value_type_id;
- __u32 map_ifindex;
- union {
- __u32 inner_map_fd;
- __u32 btf_vmlinux_value_type_id;
- };
-};
-
-LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead")
-LIBBPF_API int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr);
-LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead")
-LIBBPF_API int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
- int key_size, int value_size,
- int max_entries, __u32 map_flags, int node);
-LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead")
-LIBBPF_API int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
- int key_size, int value_size,
- int max_entries, __u32 map_flags);
-LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead")
-LIBBPF_API int bpf_create_map(enum bpf_map_type map_type, int key_size,
- int value_size, int max_entries, __u32 map_flags);
-LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead")
-LIBBPF_API int bpf_create_map_in_map_node(enum bpf_map_type map_type,
- const char *name, int key_size,
- int inner_map_fd, int max_entries,
- __u32 map_flags, int node);
-LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_map_create() instead")
-LIBBPF_API int bpf_create_map_in_map(enum bpf_map_type map_type,
- const char *name, int key_size,
- int inner_map_fd, int max_entries,
- __u32 map_flags);
-
struct bpf_prog_load_opts {
size_t sz; /* size of this struct for forward/backward compatibility */
@@ -145,54 +103,6 @@ LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
const char *prog_name, const char *license,
const struct bpf_insn *insns, size_t insn_cnt,
const struct bpf_prog_load_opts *opts);
-/* this "specialization" should go away in libbpf 1.0 */
-LIBBPF_API int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
- const char *prog_name, const char *license,
- const struct bpf_insn *insns, size_t insn_cnt,
- const struct bpf_prog_load_opts *opts);
-
-/* This is an elaborate way to not conflict with deprecated bpf_prog_load()
- * API, defined in libbpf.h. Once we hit libbpf 1.0, all this will be gone.
- * With this approach, if someone is calling bpf_prog_load() with
- * 4 arguments, they will use the deprecated API, which keeps backwards
- * compatibility (both source code and binary). If bpf_prog_load() is called
- * with 6 arguments, though, it gets redirected to __bpf_prog_load.
- * So looking forward to libbpf 1.0 when this hack will be gone and
- * __bpf_prog_load() will be called just bpf_prog_load().
- */
-#ifndef bpf_prog_load
-#define bpf_prog_load(...) ___libbpf_overload(___bpf_prog_load, __VA_ARGS__)
-#define ___bpf_prog_load4(file, type, pobj, prog_fd) \
- bpf_prog_load_deprecated(file, type, pobj, prog_fd)
-#define ___bpf_prog_load6(prog_type, prog_name, license, insns, insn_cnt, opts) \
- bpf_prog_load(prog_type, prog_name, license, insns, insn_cnt, opts)
-#endif /* bpf_prog_load */
-
-struct bpf_load_program_attr {
- enum bpf_prog_type prog_type;
- enum bpf_attach_type expected_attach_type;
- const char *name;
- const struct bpf_insn *insns;
- size_t insns_cnt;
- const char *license;
- union {
- __u32 kern_version;
- __u32 attach_prog_fd;
- };
- union {
- __u32 prog_ifindex;
- __u32 attach_btf_id;
- };
- __u32 prog_btf_fd;
- __u32 func_info_rec_size;
- const void *func_info;
- __u32 func_info_cnt;
- __u32 line_info_rec_size;
- const void *line_info;
- __u32 line_info_cnt;
- __u32 log_level;
- __u32 prog_flags;
-};
/* Flags to direct loading requirements */
#define MAPS_RELAX_COMPAT 0x01
@@ -200,22 +110,6 @@ struct bpf_load_program_attr {
/* Recommended log buffer size */
#define BPF_LOG_BUF_SIZE (UINT32_MAX >> 8) /* verifier maximum in kernels <= 5.1 */
-LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead")
-LIBBPF_API int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
- char *log_buf, size_t log_buf_sz);
-LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead")
-LIBBPF_API int bpf_load_program(enum bpf_prog_type type,
- const struct bpf_insn *insns, size_t insns_cnt,
- const char *license, __u32 kern_version,
- char *log_buf, size_t log_buf_sz);
-LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead")
-LIBBPF_API int bpf_verify_program(enum bpf_prog_type type,
- const struct bpf_insn *insns,
- size_t insns_cnt, __u32 prog_flags,
- const char *license, __u32 kern_version,
- char *log_buf, size_t log_buf_sz,
- int log_level);
-
struct bpf_btf_load_opts {
size_t sz; /* size of this struct for forward/backward compatibility */
@@ -229,10 +123,6 @@ struct bpf_btf_load_opts {
LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size,
const struct bpf_btf_load_opts *opts);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_btf_load() instead")
-LIBBPF_API int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf,
- __u32 log_buf_size, bool do_log);
-
LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value,
__u64 flags);
@@ -244,6 +134,7 @@ LIBBPF_API int bpf_map_lookup_and_delete_elem(int fd, const void *key,
LIBBPF_API int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key,
void *value, __u64 flags);
LIBBPF_API int bpf_map_delete_elem(int fd, const void *key);
+LIBBPF_API int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags);
LIBBPF_API int bpf_map_get_next_key(int fd, const void *key, void *next_key);
LIBBPF_API int bpf_map_freeze(int fd);
@@ -379,8 +270,19 @@ LIBBPF_API int bpf_map_update_batch(int fd, const void *keys, const void *values
__u32 *count,
const struct bpf_map_batch_opts *opts);
+struct bpf_obj_get_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+
+ __u32 file_flags;
+
+ size_t :0;
+};
+#define bpf_obj_get_opts__last_field file_flags
+
LIBBPF_API int bpf_obj_pin(int fd, const char *pathname);
LIBBPF_API int bpf_obj_get(const char *pathname);
+LIBBPF_API int bpf_obj_get_opts(const char *pathname,
+ const struct bpf_obj_get_opts *opts);
struct bpf_prog_attach_opts {
size_t sz; /* size of this struct for forward/backward compatibility */
@@ -394,10 +296,6 @@ LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd,
LIBBPF_API int bpf_prog_attach_opts(int prog_fd, int attachable_fd,
enum bpf_attach_type type,
const struct bpf_prog_attach_opts *opts);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_prog_attach_opts() instead")
-LIBBPF_API int bpf_prog_attach_xattr(int prog_fd, int attachable_fd,
- enum bpf_attach_type type,
- const struct bpf_prog_attach_opts *opts);
LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd,
enum bpf_attach_type type);
@@ -420,6 +318,9 @@ struct bpf_link_create_opts {
const unsigned long *addrs;
const __u64 *cookies;
} kprobe_multi;
+ struct {
+ __u64 cookie;
+ } tracing;
};
size_t :0;
};
@@ -460,17 +361,6 @@ struct bpf_prog_test_run_attr {
* out: length of cxt_out */
};
-LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_test_run_opts() instead")
-LIBBPF_API int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr);
-
-/*
- * bpf_prog_test_run does not check that data_out is large enough. Consider
- * using bpf_prog_test_run_opts instead.
- */
-LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_test_run_opts() instead")
-LIBBPF_API int bpf_prog_test_run(int prog_fd, int repeat, void *data,
- __u32 size, void *data_out, __u32 *size_out,
- __u32 *retval, __u32 *duration);
LIBBPF_API int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id);
LIBBPF_API int bpf_map_get_next_id(__u32 start_id, __u32 *next_id);
LIBBPF_API int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id);
@@ -480,9 +370,24 @@ LIBBPF_API int bpf_map_get_fd_by_id(__u32 id);
LIBBPF_API int bpf_btf_get_fd_by_id(__u32 id);
LIBBPF_API int bpf_link_get_fd_by_id(__u32 id);
LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len);
+
+struct bpf_prog_query_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+ __u32 query_flags;
+ __u32 attach_flags; /* output argument */
+ __u32 *prog_ids;
+ __u32 prog_cnt; /* input+output argument */
+ __u32 *prog_attach_flags;
+};
+#define bpf_prog_query_opts__last_field prog_attach_flags
+
+LIBBPF_API int bpf_prog_query_opts(int target_fd,
+ enum bpf_attach_type type,
+ struct bpf_prog_query_opts *opts);
LIBBPF_API int bpf_prog_query(int target_fd, enum bpf_attach_type type,
__u32 query_flags, __u32 *attach_flags,
__u32 *prog_ids, __u32 *prog_cnt);
+
LIBBPF_API int bpf_raw_tracepoint_open(const char *name, int prog_fd);
LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
__u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
index e4aa9996a550..496e6a8ee0dc 100644
--- a/tools/lib/bpf/bpf_core_read.h
+++ b/tools/lib/bpf/bpf_core_read.h
@@ -29,6 +29,7 @@ enum bpf_type_id_kind {
enum bpf_type_info_kind {
BPF_TYPE_EXISTS = 0, /* type existence in target kernel */
BPF_TYPE_SIZE = 1, /* type size in target kernel */
+ BPF_TYPE_MATCHES = 2, /* type match in target kernel */
};
/* second argument to __builtin_preserve_enum_value() built-in */
@@ -110,21 +111,50 @@ enum bpf_enum_value_kind {
val; \
})
+#define ___bpf_field_ref1(field) (field)
+#define ___bpf_field_ref2(type, field) (((typeof(type) *)0)->field)
+#define ___bpf_field_ref(args...) \
+ ___bpf_apply(___bpf_field_ref, ___bpf_narg(args))(args)
+
/*
* Convenience macro to check that field actually exists in target kernel's.
* Returns:
* 1, if matching field is present in target kernel;
* 0, if no matching field found.
+ *
+ * Supports two forms:
+ * - field reference through variable access:
+ * bpf_core_field_exists(p->my_field);
+ * - field reference through type and field names:
+ * bpf_core_field_exists(struct my_type, my_field).
*/
-#define bpf_core_field_exists(field) \
- __builtin_preserve_field_info(field, BPF_FIELD_EXISTS)
+#define bpf_core_field_exists(field...) \
+ __builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_EXISTS)
/*
* Convenience macro to get the byte size of a field. Works for integers,
* struct/unions, pointers, arrays, and enums.
+ *
+ * Supports two forms:
+ * - field reference through variable access:
+ * bpf_core_field_size(p->my_field);
+ * - field reference through type and field names:
+ * bpf_core_field_size(struct my_type, my_field).
+ */
+#define bpf_core_field_size(field...) \
+ __builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_BYTE_SIZE)
+
+/*
+ * Convenience macro to get field's byte offset.
+ *
+ * Supports two forms:
+ * - field reference through variable access:
+ * bpf_core_field_offset(p->my_field);
+ * - field reference through type and field names:
+ * bpf_core_field_offset(struct my_type, my_field).
*/
-#define bpf_core_field_size(field) \
- __builtin_preserve_field_info(field, BPF_FIELD_BYTE_SIZE)
+#define bpf_core_field_offset(field...) \
+ __builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_BYTE_OFFSET)
/*
* Convenience macro to get BTF type ID of a specified type, using a local BTF
@@ -155,6 +185,16 @@ enum bpf_enum_value_kind {
__builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_EXISTS)
/*
+ * Convenience macro to check that provided named type
+ * (struct/union/enum/typedef) "matches" that in a target kernel.
+ * Returns:
+ * 1, if the type matches in the target kernel's BTF;
+ * 0, if the type does not match any in the target kernel
+ */
+#define bpf_core_type_matches(type) \
+ __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_MATCHES)
+
+/*
* Convenience macro to get the byte size of a provided named type
* (struct/union/enum/typedef) in a target kernel.
* Returns:
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index 44df982d2a5c..7349b16b8e2f 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -22,12 +22,25 @@
* To allow use of SEC() with externs (e.g., for extern .maps declarations),
* make sure __attribute__((unused)) doesn't trigger compilation warning.
*/
+#if __GNUC__ && !__clang__
+
+/*
+ * Pragma macros are broken on GCC
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=55578
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90400
+ */
+#define SEC(name) __attribute__((section(name), used))
+
+#else
+
#define SEC(name) \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \
__attribute__((section(name), used)) \
_Pragma("GCC diagnostic pop") \
+#endif
+
/* Avoid 'linux/stddef.h' definition of '__always_inline'. */
#undef __always_inline
#define __always_inline inline __attribute__((always_inline))
@@ -76,6 +89,30 @@
#endif
/*
+ * Compiler (optimization) barrier.
+ */
+#ifndef barrier
+#define barrier() asm volatile("" ::: "memory")
+#endif
+
+/* Variable-specific compiler (optimization) barrier. It's a no-op which makes
+ * compiler believe that there is some black box modification of a given
+ * variable and thus prevents compiler from making extra assumption about its
+ * value and potential simplifications and optimizations on this variable.
+ *
+ * E.g., compiler might often delay or even omit 32-bit to 64-bit casting of
+ * a variable, making some code patterns unverifiable. Putting barrier_var()
+ * in place will ensure that cast is performed before the barrier_var()
+ * invocation, because compiler has to pessimistically assume that embedded
+ * asm section might perform some extra operations on that variable.
+ *
+ * This is a variable-specific variant of more global barrier().
+ */
+#ifndef barrier_var
+#define barrier_var(var) asm volatile("" : "=r"(var) : "0"(var))
+#endif
+
+/*
* Helper macro to throw a compilation error if __bpf_unreachable() gets
* built into the resulting code. This works given BPF back end does not
* implement __builtin_trap(). This is useful to assert that certain paths
@@ -149,6 +186,8 @@ enum libbpf_tristate {
#define __kconfig __attribute__((section(".kconfig")))
#define __ksym __attribute__((section(".ksyms")))
+#define __kptr __attribute__((btf_type_tag("kptr")))
+#define __kptr_ref __attribute__((btf_type_tag("kptr_ref")))
#ifndef ___bpf_concat
#define ___bpf_concat(a, b) a ## b
diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
index e3a8c947e89f..43ca3aff2292 100644
--- a/tools/lib/bpf/bpf_tracing.h
+++ b/tools/lib/bpf/bpf_tracing.h
@@ -2,6 +2,8 @@
#ifndef __BPF_TRACING_H__
#define __BPF_TRACING_H__
+#include <bpf/bpf_helpers.h>
+
/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
#if defined(__TARGET_ARCH_x86)
#define bpf_target_x86
@@ -27,6 +29,9 @@
#elif defined(__TARGET_ARCH_riscv)
#define bpf_target_riscv
#define bpf_target_defined
+#elif defined(__TARGET_ARCH_arc)
+ #define bpf_target_arc
+ #define bpf_target_defined
#else
/* Fall back to what the compiler says */
@@ -54,6 +59,9 @@
#elif defined(__riscv) && __riscv_xlen == 64
#define bpf_target_riscv
#define bpf_target_defined
+#elif defined(__arc__)
+ #define bpf_target_arc
+ #define bpf_target_defined
#endif /* no compiler target */
#endif
@@ -134,7 +142,7 @@ struct pt_regs___s390 {
#define __PT_RC_REG gprs[2]
#define __PT_SP_REG gprs[15]
#define __PT_IP_REG psw.addr
-#define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma("GCC error \"use PT_REGS_PARM1_CORE_SYSCALL() instead\""); 0l; })
+#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___s390 *)(x), orig_gpr2)
#elif defined(bpf_target_arm)
@@ -168,7 +176,7 @@ struct pt_regs___arm64 {
#define __PT_RC_REG regs[0]
#define __PT_SP_REG sp
#define __PT_IP_REG pc
-#define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma("GCC error \"use PT_REGS_PARM1_CORE_SYSCALL() instead\""); 0l; })
+#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___arm64 *)(x), orig_x0)
#elif defined(bpf_target_mips)
@@ -227,12 +235,29 @@ struct pt_regs___arm64 {
#define __PT_PARM5_REG a4
#define __PT_RET_REG ra
#define __PT_FP_REG s0
-#define __PT_RC_REG a5
+#define __PT_RC_REG a0
#define __PT_SP_REG sp
#define __PT_IP_REG pc
/* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */
#define PT_REGS_SYSCALL_REGS(ctx) ctx
+#elif defined(bpf_target_arc)
+
+/* arc provides struct user_pt_regs instead of struct pt_regs to userspace */
+#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
+#define __PT_PARM1_REG scratch.r0
+#define __PT_PARM2_REG scratch.r1
+#define __PT_PARM3_REG scratch.r2
+#define __PT_PARM4_REG scratch.r3
+#define __PT_PARM5_REG scratch.r4
+#define __PT_RET_REG scratch.blink
+#define __PT_FP_REG __unsupported__
+#define __PT_RC_REG scratch.r0
+#define __PT_SP_REG scratch.sp
+#define __PT_IP_REG scratch.ret
+/* arc does not select ARCH_HAS_SYSCALL_WRAPPER. */
+#define PT_REGS_SYSCALL_REGS(ctx) ctx
+
#endif
#if defined(bpf_target_defined)
@@ -470,39 +495,69 @@ typeof(name(0)) name(struct pt_regs *ctx) \
} \
static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
+/* If kernel has CONFIG_ARCH_HAS_SYSCALL_WRAPPER, read pt_regs directly */
#define ___bpf_syscall_args0() ctx
-#define ___bpf_syscall_args1(x) ___bpf_syscall_args0(), (void *)PT_REGS_PARM1_CORE_SYSCALL(regs)
-#define ___bpf_syscall_args2(x, args...) ___bpf_syscall_args1(args), (void *)PT_REGS_PARM2_CORE_SYSCALL(regs)
-#define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (void *)PT_REGS_PARM3_CORE_SYSCALL(regs)
-#define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (void *)PT_REGS_PARM4_CORE_SYSCALL(regs)
-#define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (void *)PT_REGS_PARM5_CORE_SYSCALL(regs)
+#define ___bpf_syscall_args1(x) ___bpf_syscall_args0(), (void *)PT_REGS_PARM1_SYSCALL(regs)
+#define ___bpf_syscall_args2(x, args...) ___bpf_syscall_args1(args), (void *)PT_REGS_PARM2_SYSCALL(regs)
+#define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (void *)PT_REGS_PARM3_SYSCALL(regs)
+#define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (void *)PT_REGS_PARM4_SYSCALL(regs)
+#define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (void *)PT_REGS_PARM5_SYSCALL(regs)
#define ___bpf_syscall_args(args...) ___bpf_apply(___bpf_syscall_args, ___bpf_narg(args))(args)
+/* If kernel doesn't have CONFIG_ARCH_HAS_SYSCALL_WRAPPER, we have to BPF_CORE_READ from pt_regs */
+#define ___bpf_syswrap_args0() ctx
+#define ___bpf_syswrap_args1(x) ___bpf_syswrap_args0(), (void *)PT_REGS_PARM1_CORE_SYSCALL(regs)
+#define ___bpf_syswrap_args2(x, args...) ___bpf_syswrap_args1(args), (void *)PT_REGS_PARM2_CORE_SYSCALL(regs)
+#define ___bpf_syswrap_args3(x, args...) ___bpf_syswrap_args2(args), (void *)PT_REGS_PARM3_CORE_SYSCALL(regs)
+#define ___bpf_syswrap_args4(x, args...) ___bpf_syswrap_args3(args), (void *)PT_REGS_PARM4_CORE_SYSCALL(regs)
+#define ___bpf_syswrap_args5(x, args...) ___bpf_syswrap_args4(args), (void *)PT_REGS_PARM5_CORE_SYSCALL(regs)
+#define ___bpf_syswrap_args(args...) ___bpf_apply(___bpf_syswrap_args, ___bpf_narg(args))(args)
+
/*
- * BPF_KPROBE_SYSCALL is a variant of BPF_KPROBE, which is intended for
+ * BPF_KSYSCALL is a variant of BPF_KPROBE, which is intended for
* tracing syscall functions, like __x64_sys_close. It hides the underlying
* platform-specific low-level way of getting syscall input arguments from
* struct pt_regs, and provides a familiar typed and named function arguments
* syntax and semantics of accessing syscall input parameters.
*
- * Original struct pt_regs* context is preserved as 'ctx' argument. This might
+ * Original struct pt_regs * context is preserved as 'ctx' argument. This might
* be necessary when using BPF helpers like bpf_perf_event_output().
*
- * This macro relies on BPF CO-RE support.
+ * At the moment BPF_KSYSCALL does not transparently handle all the calling
+ * convention quirks for the following syscalls:
+ *
+ * - mmap(): __ARCH_WANT_SYS_OLD_MMAP.
+ * - clone(): CONFIG_CLONE_BACKWARDS, CONFIG_CLONE_BACKWARDS2 and
+ * CONFIG_CLONE_BACKWARDS3.
+ * - socket-related syscalls: __ARCH_WANT_SYS_SOCKETCALL.
+ * - compat syscalls.
+ *
+ * This may or may not change in the future. User needs to take extra measures
+ * to handle such quirks explicitly, if necessary.
+ *
+ * This macro relies on BPF CO-RE support and virtual __kconfig externs.
*/
-#define BPF_KPROBE_SYSCALL(name, args...) \
+#define BPF_KSYSCALL(name, args...) \
name(struct pt_regs *ctx); \
+extern _Bool LINUX_HAS_SYSCALL_WRAPPER __kconfig; \
static __attribute__((always_inline)) typeof(name(0)) \
____##name(struct pt_regs *ctx, ##args); \
typeof(name(0)) name(struct pt_regs *ctx) \
{ \
- struct pt_regs *regs = PT_REGS_SYSCALL_REGS(ctx); \
+ struct pt_regs *regs = LINUX_HAS_SYSCALL_WRAPPER \
+ ? (struct pt_regs *)PT_REGS_PARM1(ctx) \
+ : ctx; \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
- return ____##name(___bpf_syscall_args(args)); \
+ if (LINUX_HAS_SYSCALL_WRAPPER) \
+ return ____##name(___bpf_syswrap_args(args)); \
+ else \
+ return ____##name(___bpf_syscall_args(args)); \
_Pragma("GCC diagnostic pop") \
} \
static __attribute__((always_inline)) typeof(name(0)) \
____##name(struct pt_regs *ctx, ##args)
+#define BPF_KPROBE_SYSCALL BPF_KSYSCALL
+
#endif
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 1383e26c5d1f..2d14f1a52d7a 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -130,7 +130,7 @@ static inline __u64 ptr_to_u64(const void *ptr)
/* Ensure given dynamically allocated memory region pointed to by *data* with
* capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough
- * memory to accomodate *add_cnt* new elements, assuming *cur_cnt* elements
+ * memory to accommodate *add_cnt* new elements, assuming *cur_cnt* elements
* are already used. At most *max_cnt* elements can be ever allocated.
* If necessary, memory is reallocated and all existing data is copied over,
* new pointer to the memory region is stored at *data, new memory region
@@ -305,6 +305,8 @@ static int btf_type_size(const struct btf_type *t)
return base_size + sizeof(__u32);
case BTF_KIND_ENUM:
return base_size + vlen * sizeof(struct btf_enum);
+ case BTF_KIND_ENUM64:
+ return base_size + vlen * sizeof(struct btf_enum64);
case BTF_KIND_ARRAY:
return base_size + sizeof(struct btf_array);
case BTF_KIND_STRUCT:
@@ -334,6 +336,7 @@ static void btf_bswap_type_base(struct btf_type *t)
static int btf_bswap_type_rest(struct btf_type *t)
{
struct btf_var_secinfo *v;
+ struct btf_enum64 *e64;
struct btf_member *m;
struct btf_array *a;
struct btf_param *p;
@@ -361,6 +364,13 @@ static int btf_bswap_type_rest(struct btf_type *t)
e->val = bswap_32(e->val);
}
return 0;
+ case BTF_KIND_ENUM64:
+ for (i = 0, e64 = btf_enum64(t); i < vlen; i++, e64++) {
+ e64->name_off = bswap_32(e64->name_off);
+ e64->val_lo32 = bswap_32(e64->val_lo32);
+ e64->val_hi32 = bswap_32(e64->val_hi32);
+ }
+ return 0;
case BTF_KIND_ARRAY:
a = btf_array(t);
a->type = bswap_32(a->type);
@@ -438,11 +448,6 @@ static int btf_parse_type_sec(struct btf *btf)
return 0;
}
-__u32 btf__get_nr_types(const struct btf *btf)
-{
- return btf->start_id + btf->nr_types - 1;
-}
-
__u32 btf__type_cnt(const struct btf *btf)
{
return btf->start_id + btf->nr_types;
@@ -472,9 +477,22 @@ const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
static int determine_ptr_size(const struct btf *btf)
{
+ static const char * const long_aliases[] = {
+ "long",
+ "long int",
+ "int long",
+ "unsigned long",
+ "long unsigned",
+ "unsigned long int",
+ "unsigned int long",
+ "long unsigned int",
+ "long int unsigned",
+ "int unsigned long",
+ "int long unsigned",
+ };
const struct btf_type *t;
const char *name;
- int i, n;
+ int i, j, n;
if (btf->base_btf && btf->base_btf->ptr_sz > 0)
return btf->base_btf->ptr_sz;
@@ -485,15 +503,16 @@ static int determine_ptr_size(const struct btf *btf)
if (!btf_is_int(t))
continue;
+ if (t->size != 4 && t->size != 8)
+ continue;
+
name = btf__name_by_offset(btf, t->name_off);
if (!name)
continue;
- if (strcmp(name, "long int") == 0 ||
- strcmp(name, "long unsigned int") == 0) {
- if (t->size != 4 && t->size != 8)
- continue;
- return t->size;
+ for (j = 0; j < ARRAY_SIZE(long_aliases); j++) {
+ if (strcmp(name, long_aliases[j]) == 0)
+ return t->size;
}
}
@@ -597,6 +616,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
case BTF_KIND_DATASEC:
case BTF_KIND_FLOAT:
size = t->size;
@@ -644,6 +664,7 @@ int btf__align_of(const struct btf *btf, __u32 id)
switch (kind) {
case BTF_KIND_INT:
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
case BTF_KIND_FLOAT:
return min(btf_ptr_sz(btf), (size_t)t->size);
case BTF_KIND_PTR:
@@ -1382,92 +1403,6 @@ struct btf *btf__load_from_kernel_by_id(__u32 id)
return btf__load_from_kernel_by_id_split(id, NULL);
}
-int btf__get_from_id(__u32 id, struct btf **btf)
-{
- struct btf *res;
- int err;
-
- *btf = NULL;
- res = btf__load_from_kernel_by_id(id);
- err = libbpf_get_error(res);
-
- if (err)
- return libbpf_err(err);
-
- *btf = res;
- return 0;
-}
-
-int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
- __u32 expected_key_size, __u32 expected_value_size,
- __u32 *key_type_id, __u32 *value_type_id)
-{
- const struct btf_type *container_type;
- const struct btf_member *key, *value;
- const size_t max_name = 256;
- char container_name[max_name];
- __s64 key_size, value_size;
- __s32 container_id;
-
- if (snprintf(container_name, max_name, "____btf_map_%s", map_name) == max_name) {
- pr_warn("map:%s length of '____btf_map_%s' is too long\n",
- map_name, map_name);
- return libbpf_err(-EINVAL);
- }
-
- container_id = btf__find_by_name(btf, container_name);
- if (container_id < 0) {
- pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
- map_name, container_name);
- return libbpf_err(container_id);
- }
-
- container_type = btf__type_by_id(btf, container_id);
- if (!container_type) {
- pr_warn("map:%s cannot find BTF type for container_id:%u\n",
- map_name, container_id);
- return libbpf_err(-EINVAL);
- }
-
- if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) {
- pr_warn("map:%s container_name:%s is an invalid container struct\n",
- map_name, container_name);
- return libbpf_err(-EINVAL);
- }
-
- key = btf_members(container_type);
- value = key + 1;
-
- key_size = btf__resolve_size(btf, key->type);
- if (key_size < 0) {
- pr_warn("map:%s invalid BTF key_type_size\n", map_name);
- return libbpf_err(key_size);
- }
-
- if (expected_key_size != key_size) {
- pr_warn("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
- map_name, (__u32)key_size, expected_key_size);
- return libbpf_err(-EINVAL);
- }
-
- value_size = btf__resolve_size(btf, value->type);
- if (value_size < 0) {
- pr_warn("map:%s invalid BTF value_type_size\n", map_name);
- return libbpf_err(value_size);
- }
-
- if (expected_value_size != value_size) {
- pr_warn("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
- map_name, (__u32)value_size, expected_value_size);
- return libbpf_err(-EINVAL);
- }
-
- *key_type_id = key->type;
- *value_type_id = value->type;
-
- return 0;
-}
-
static void btf_invalidate_raw_data(struct btf *btf)
{
if (btf->raw_data) {
@@ -2115,20 +2050,8 @@ int btf__add_field(struct btf *btf, const char *name, int type_id,
return 0;
}
-/*
- * Append new BTF_KIND_ENUM type with:
- * - *name* - name of the enum, can be NULL or empty for anonymous enums;
- * - *byte_sz* - size of the enum, in bytes.
- *
- * Enum initially has no enum values in it (and corresponds to enum forward
- * declaration). Enumerator values can be added by btf__add_enum_value()
- * immediately after btf__add_enum() succeeds.
- *
- * Returns:
- * - >0, type ID of newly added BTF type;
- * - <0, on error.
- */
-int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
+static int btf_add_enum_common(struct btf *btf, const char *name, __u32 byte_sz,
+ bool is_signed, __u8 kind)
{
struct btf_type *t;
int sz, name_off = 0;
@@ -2153,13 +2076,35 @@ int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
/* start out with vlen=0; it will be adjusted when adding enum values */
t->name_off = name_off;
- t->info = btf_type_info(BTF_KIND_ENUM, 0, 0);
+ t->info = btf_type_info(kind, 0, is_signed);
t->size = byte_sz;
return btf_commit_type(btf, sz);
}
/*
+ * Append new BTF_KIND_ENUM type with:
+ * - *name* - name of the enum, can be NULL or empty for anonymous enums;
+ * - *byte_sz* - size of the enum, in bytes.
+ *
+ * Enum initially has no enum values in it (and corresponds to enum forward
+ * declaration). Enumerator values can be added by btf__add_enum_value()
+ * immediately after btf__add_enum() succeeds.
+ *
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
+{
+ /*
+ * set the signedness to be unsigned, it will change to signed
+ * if any later enumerator is negative.
+ */
+ return btf_add_enum_common(btf, name, byte_sz, false, BTF_KIND_ENUM);
+}
+
+/*
* Append new enum value for the current ENUM type with:
* - *name* - name of the enumerator value, can't be NULL or empty;
* - *value* - integer value corresponding to enum value *name*;
@@ -2206,6 +2151,82 @@ int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
t = btf_last_type(btf);
btf_type_inc_vlen(t);
+ /* if negative value, set signedness to signed */
+ if (value < 0)
+ t->info = btf_type_info(btf_kind(t), btf_vlen(t), true);
+
+ btf->hdr->type_len += sz;
+ btf->hdr->str_off += sz;
+ return 0;
+}
+
+/*
+ * Append new BTF_KIND_ENUM64 type with:
+ * - *name* - name of the enum, can be NULL or empty for anonymous enums;
+ * - *byte_sz* - size of the enum, in bytes.
+ * - *is_signed* - whether the enum values are signed or not;
+ *
+ * Enum initially has no enum values in it (and corresponds to enum forward
+ * declaration). Enumerator values can be added by btf__add_enum64_value()
+ * immediately after btf__add_enum64() succeeds.
+ *
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_enum64(struct btf *btf, const char *name, __u32 byte_sz,
+ bool is_signed)
+{
+ return btf_add_enum_common(btf, name, byte_sz, is_signed,
+ BTF_KIND_ENUM64);
+}
+
+/*
+ * Append new enum value for the current ENUM64 type with:
+ * - *name* - name of the enumerator value, can't be NULL or empty;
+ * - *value* - integer value corresponding to enum value *name*;
+ * Returns:
+ * - 0, on success;
+ * - <0, on error.
+ */
+int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value)
+{
+ struct btf_enum64 *v;
+ struct btf_type *t;
+ int sz, name_off;
+
+ /* last type should be BTF_KIND_ENUM64 */
+ if (btf->nr_types == 0)
+ return libbpf_err(-EINVAL);
+ t = btf_last_type(btf);
+ if (!btf_is_enum64(t))
+ return libbpf_err(-EINVAL);
+
+ /* non-empty name */
+ if (!name || !name[0])
+ return libbpf_err(-EINVAL);
+
+ /* decompose and invalidate raw data */
+ if (btf_ensure_modifiable(btf))
+ return libbpf_err(-ENOMEM);
+
+ sz = sizeof(struct btf_enum64);
+ v = btf_add_type_mem(btf, sz);
+ if (!v)
+ return libbpf_err(-ENOMEM);
+
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
+
+ v->name_off = name_off;
+ v->val_lo32 = (__u32)value;
+ v->val_hi32 = value >> 32;
+
+ /* update parent type's vlen */
+ t = btf_last_type(btf);
+ btf_type_inc_vlen(t);
+
btf->hdr->type_len += sz;
btf->hdr->str_off += sz;
return 0;
@@ -2626,6 +2647,7 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
const struct btf_ext_info_sec *sinfo;
struct btf_ext_info *ext_info;
__u32 info_left, record_size;
+ size_t sec_cnt = 0;
/* The start of the info sec (including the __u32 record_size). */
void *info;
@@ -2689,8 +2711,7 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
return -EINVAL;
}
- total_record_size = sec_hdrlen +
- (__u64)num_records * record_size;
+ total_record_size = sec_hdrlen + (__u64)num_records * record_size;
if (info_left < total_record_size) {
pr_debug("%s section has incorrect num_records in .BTF.ext\n",
ext_sec->desc);
@@ -2699,12 +2720,14 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
info_left -= total_record_size;
sinfo = (void *)sinfo + total_record_size;
+ sec_cnt++;
}
ext_info = ext_sec->ext_info;
ext_info->len = ext_sec->len - sizeof(__u32);
ext_info->rec_size = record_size;
ext_info->info = info + sizeof(__u32);
+ ext_info->sec_cnt = sec_cnt;
return 0;
}
@@ -2788,6 +2811,9 @@ void btf_ext__free(struct btf_ext *btf_ext)
{
if (IS_ERR_OR_NULL(btf_ext))
return;
+ free(btf_ext->func_info.sec_idxs);
+ free(btf_ext->line_info.sec_idxs);
+ free(btf_ext->core_relo_info.sec_idxs);
free(btf_ext->data);
free(btf_ext);
}
@@ -2826,10 +2852,8 @@ struct btf_ext *btf_ext__new(const __u8 *data, __u32 size)
if (err)
goto done;
- if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) {
- err = -EINVAL;
- goto done;
- }
+ if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
+ goto done; /* skip core relos parsing */
err = btf_ext_setup_core_relos(btf_ext);
if (err)
@@ -2850,81 +2874,6 @@ const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
return btf_ext->data;
}
-static int btf_ext_reloc_info(const struct btf *btf,
- const struct btf_ext_info *ext_info,
- const char *sec_name, __u32 insns_cnt,
- void **info, __u32 *cnt)
-{
- __u32 sec_hdrlen = sizeof(struct btf_ext_info_sec);
- __u32 i, record_size, existing_len, records_len;
- struct btf_ext_info_sec *sinfo;
- const char *info_sec_name;
- __u64 remain_len;
- void *data;
-
- record_size = ext_info->rec_size;
- sinfo = ext_info->info;
- remain_len = ext_info->len;
- while (remain_len > 0) {
- records_len = sinfo->num_info * record_size;
- info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off);
- if (strcmp(info_sec_name, sec_name)) {
- remain_len -= sec_hdrlen + records_len;
- sinfo = (void *)sinfo + sec_hdrlen + records_len;
- continue;
- }
-
- existing_len = (*cnt) * record_size;
- data = realloc(*info, existing_len + records_len);
- if (!data)
- return libbpf_err(-ENOMEM);
-
- memcpy(data + existing_len, sinfo->data, records_len);
- /* adjust insn_off only, the rest data will be passed
- * to the kernel.
- */
- for (i = 0; i < sinfo->num_info; i++) {
- __u32 *insn_off;
-
- insn_off = data + existing_len + (i * record_size);
- *insn_off = *insn_off / sizeof(struct bpf_insn) + insns_cnt;
- }
- *info = data;
- *cnt += sinfo->num_info;
- return 0;
- }
-
- return libbpf_err(-ENOENT);
-}
-
-int btf_ext__reloc_func_info(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const char *sec_name, __u32 insns_cnt,
- void **func_info, __u32 *cnt)
-{
- return btf_ext_reloc_info(btf, &btf_ext->func_info, sec_name,
- insns_cnt, func_info, cnt);
-}
-
-int btf_ext__reloc_line_info(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const char *sec_name, __u32 insns_cnt,
- void **line_info, __u32 *cnt)
-{
- return btf_ext_reloc_info(btf, &btf_ext->line_info, sec_name,
- insns_cnt, line_info, cnt);
-}
-
-__u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext)
-{
- return btf_ext->func_info.rec_size;
-}
-
-__u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)
-{
- return btf_ext->line_info.rec_size;
-}
-
struct btf_dedup;
static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
@@ -3074,9 +3023,7 @@ static int btf_dedup_remap_types(struct btf_dedup *d);
* deduplicating structs/unions is described in greater details in comments for
* `btf_dedup_is_equiv` function.
*/
-
-DEFAULT_VERSION(btf__dedup_v0_6_0, btf__dedup, LIBBPF_0.6.0)
-int btf__dedup_v0_6_0(struct btf *btf, const struct btf_dedup_opts *opts)
+int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
{
struct btf_dedup *d;
int err;
@@ -3136,19 +3083,6 @@ done:
return libbpf_err(err);
}
-COMPAT_VERSION(btf__dedup_deprecated, btf__dedup, LIBBPF_0.0.2)
-int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, const void *unused_opts)
-{
- LIBBPF_OPTS(btf_dedup_opts, opts, .btf_ext = btf_ext);
-
- if (unused_opts) {
- pr_warn("please use new version of btf__dedup() that supports options\n");
- return libbpf_err(-ENOTSUP);
- }
-
- return btf__dedup(btf, &opts);
-}
-
#define BTF_UNPROCESSED_ID ((__u32)-1)
#define BTF_IN_PROGRESS_ID ((__u32)-2)
@@ -3467,7 +3401,7 @@ static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2)
return info1 == info2;
}
-/* Calculate type signature hash of ENUM. */
+/* Calculate type signature hash of ENUM/ENUM64. */
static long btf_hash_enum(struct btf_type *t)
{
long h;
@@ -3501,9 +3435,31 @@ static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
return true;
}
+static bool btf_equal_enum64(struct btf_type *t1, struct btf_type *t2)
+{
+ const struct btf_enum64 *m1, *m2;
+ __u16 vlen;
+ int i;
+
+ if (!btf_equal_common(t1, t2))
+ return false;
+
+ vlen = btf_vlen(t1);
+ m1 = btf_enum64(t1);
+ m2 = btf_enum64(t2);
+ for (i = 0; i < vlen; i++) {
+ if (m1->name_off != m2->name_off || m1->val_lo32 != m2->val_lo32 ||
+ m1->val_hi32 != m2->val_hi32)
+ return false;
+ m1++;
+ m2++;
+ }
+ return true;
+}
+
static inline bool btf_is_enum_fwd(struct btf_type *t)
{
- return btf_is_enum(t) && btf_vlen(t) == 0;
+ return btf_is_any_enum(t) && btf_vlen(t) == 0;
}
static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
@@ -3516,6 +3472,17 @@ static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
t1->size == t2->size;
}
+static bool btf_compat_enum64(struct btf_type *t1, struct btf_type *t2)
+{
+ if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
+ return btf_equal_enum64(t1, t2);
+
+ /* ignore vlen when comparing */
+ return t1->name_off == t2->name_off &&
+ (t1->info & ~0xffff) == (t2->info & ~0xffff) &&
+ t1->size == t2->size;
+}
+
/*
* Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
* as referenced type IDs equivalence is established separately during type
@@ -3728,6 +3695,7 @@ static int btf_dedup_prep(struct btf_dedup *d)
h = btf_hash_int_decl_tag(t);
break;
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
h = btf_hash_enum(t);
break;
case BTF_KIND_STRUCT:
@@ -3817,6 +3785,27 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
}
break;
+ case BTF_KIND_ENUM64:
+ h = btf_hash_enum(t);
+ for_each_dedup_cand(d, hash_entry, h) {
+ cand_id = (__u32)(long)hash_entry->value;
+ cand = btf_type_by_id(d->btf, cand_id);
+ if (btf_equal_enum64(t, cand)) {
+ new_id = cand_id;
+ break;
+ }
+ if (btf_compat_enum64(t, cand)) {
+ if (btf_is_enum_fwd(t)) {
+ /* resolve fwd to full enum */
+ new_id = cand_id;
+ break;
+ }
+ /* resolve canonical enum fwd to full enum */
+ d->map[cand_id] = type_id;
+ }
+ }
+ break;
+
case BTF_KIND_FWD:
case BTF_KIND_FLOAT:
h = btf_hash_common(t);
@@ -4112,6 +4101,9 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
case BTF_KIND_ENUM:
return btf_compat_enum(cand_type, canon_type);
+ case BTF_KIND_ENUM64:
+ return btf_compat_enum64(cand_type, canon_type);
+
case BTF_KIND_FWD:
case BTF_KIND_FLOAT:
return btf_equal_common(cand_type, canon_type);
@@ -4714,6 +4706,7 @@ int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ct
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
return 0;
case BTF_KIND_FWD:
@@ -4808,6 +4801,16 @@ int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ct
}
break;
}
+ case BTF_KIND_ENUM64: {
+ struct btf_enum64 *m = btf_enum64(t);
+
+ for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
+ err = visit(&m->name_off, ctx);
+ if (err)
+ return err;
+ }
+ break;
+ }
case BTF_KIND_FUNC_PROTO: {
struct btf_param *m = btf_params(t);
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index 951ac7475794..583760df83b4 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -120,20 +120,12 @@ LIBBPF_API struct btf *libbpf_find_kernel_btf(void);
LIBBPF_API struct btf *btf__load_from_kernel_by_id(__u32 id);
LIBBPF_API struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf);
-LIBBPF_DEPRECATED_SINCE(0, 6, "use btf__load_from_kernel_by_id instead")
-LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf);
-LIBBPF_DEPRECATED_SINCE(0, 6, "intended for internal libbpf use only")
-LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf);
-LIBBPF_DEPRECATED_SINCE(0, 6, "use btf__load_into_kernel instead")
-LIBBPF_API int btf__load(struct btf *btf);
LIBBPF_API int btf__load_into_kernel(struct btf *btf);
LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
const char *type_name);
LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf,
const char *type_name, __u32 kind);
-LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__type_cnt() instead; note that btf__get_nr_types() == btf__type_cnt() - 1")
-LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
LIBBPF_API __u32 btf__type_cnt(const struct btf *btf);
LIBBPF_API const struct btf *btf__base_btf(const struct btf *btf);
LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
@@ -150,29 +142,10 @@ LIBBPF_API void btf__set_fd(struct btf *btf, int fd);
LIBBPF_API const void *btf__raw_data(const struct btf *btf, __u32 *size);
LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
LIBBPF_API const char *btf__str_by_offset(const struct btf *btf, __u32 offset);
-LIBBPF_DEPRECATED_SINCE(0, 7, "this API is not necessary when BTF-defined maps are used")
-LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
- __u32 expected_key_size,
- __u32 expected_value_size,
- __u32 *key_type_id, __u32 *value_type_id);
LIBBPF_API struct btf_ext *btf_ext__new(const __u8 *data, __u32 size);
LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext);
LIBBPF_API const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size);
-LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_func_info was never meant as a public API and has wrong assumptions embedded in it; it will be removed in the future libbpf versions")
-int btf_ext__reloc_func_info(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const char *sec_name, __u32 insns_cnt,
- void **func_info, __u32 *cnt);
-LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_line_info was never meant as a public API and has wrong assumptions embedded in it; it will be removed in the future libbpf versions")
-int btf_ext__reloc_line_info(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const char *sec_name, __u32 insns_cnt,
- void **line_info, __u32 *cnt);
-LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_func_info is deprecated; write custom func_info parsing to fetch rec_size")
-__u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
-LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_line_info is deprecated; write custom line_info parsing to fetch rec_size")
-__u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
LIBBPF_API int btf__find_str(struct btf *btf, const char *s);
LIBBPF_API int btf__add_str(struct btf *btf, const char *s);
@@ -215,6 +188,8 @@ LIBBPF_API int btf__add_field(struct btf *btf, const char *name, int field_type_
/* enum construction APIs */
LIBBPF_API int btf__add_enum(struct btf *btf, const char *name, __u32 bytes_sz);
LIBBPF_API int btf__add_enum_value(struct btf *btf, const char *name, __s64 value);
+LIBBPF_API int btf__add_enum64(struct btf *btf, const char *name, __u32 bytes_sz, bool is_signed);
+LIBBPF_API int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value);
enum btf_fwd_kind {
BTF_FWD_STRUCT = 0,
@@ -257,22 +232,12 @@ struct btf_dedup_opts {
LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts);
-LIBBPF_API int btf__dedup_v0_6_0(struct btf *btf, const struct btf_dedup_opts *opts);
-
-LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__dedup() instead")
-LIBBPF_API int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, const void *opts);
-#define btf__dedup(...) ___libbpf_overload(___btf_dedup, __VA_ARGS__)
-#define ___btf_dedup3(btf, btf_ext, opts) btf__dedup_deprecated(btf, btf_ext, opts)
-#define ___btf_dedup2(btf, opts) btf__dedup(btf, opts)
-
struct btf_dump;
struct btf_dump_opts {
- union {
- size_t sz;
- void *ctx; /* DEPRECATED: will be gone in v1.0 */
- };
+ size_t sz;
};
+#define btf_dump_opts__last_field sz
typedef void (*btf_dump_printf_fn_t)(void *ctx, const char *fmt, va_list args);
@@ -281,51 +246,6 @@ LIBBPF_API struct btf_dump *btf_dump__new(const struct btf *btf,
void *ctx,
const struct btf_dump_opts *opts);
-LIBBPF_API struct btf_dump *btf_dump__new_v0_6_0(const struct btf *btf,
- btf_dump_printf_fn_t printf_fn,
- void *ctx,
- const struct btf_dump_opts *opts);
-
-LIBBPF_API struct btf_dump *btf_dump__new_deprecated(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const struct btf_dump_opts *opts,
- btf_dump_printf_fn_t printf_fn);
-
-/* Choose either btf_dump__new() or btf_dump__new_deprecated() based on the
- * type of 4th argument. If it's btf_dump's print callback, use deprecated
- * API; otherwise, choose the new btf_dump__new(). ___libbpf_override()
- * doesn't work here because both variants have 4 input arguments.
- *
- * (void *) casts are necessary to avoid compilation warnings about type
- * mismatches, because even though __builtin_choose_expr() only ever evaluates
- * one side the other side still has to satisfy type constraints (this is
- * compiler implementation limitation which might be lifted eventually,
- * according to the documentation). So passing struct btf_ext in place of
- * btf_dump_printf_fn_t would be generating compilation warning. Casting to
- * void * avoids this issue.
- *
- * Also, two type compatibility checks for a function and function pointer are
- * required because passing function reference into btf_dump__new() as
- * btf_dump__new(..., my_callback, ...) and as btf_dump__new(...,
- * &my_callback, ...) (not explicit ampersand in the latter case) actually
- * differs as far as __builtin_types_compatible_p() is concerned. Thus two
- * checks are combined to detect callback argument.
- *
- * The rest works just like in case of ___libbpf_override() usage with symbol
- * versioning.
- *
- * C++ compilers don't support __builtin_types_compatible_p(), so at least
- * don't screw up compilation for them and let C++ users pick btf_dump__new
- * vs btf_dump__new_deprecated explicitly.
- */
-#ifndef __cplusplus
-#define btf_dump__new(a1, a2, a3, a4) __builtin_choose_expr( \
- __builtin_types_compatible_p(typeof(a4), btf_dump_printf_fn_t) || \
- __builtin_types_compatible_p(typeof(a4), void(void *, const char *, va_list)), \
- btf_dump__new_deprecated((void *)a1, (void *)a2, (void *)a3, (void *)a4), \
- btf_dump__new((void *)a1, (void *)a2, (void *)a3, (void *)a4))
-#endif
-
LIBBPF_API void btf_dump__free(struct btf_dump *d);
LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id);
@@ -393,9 +313,10 @@ btf_dump__dump_type_data(struct btf_dump *d, __u32 id,
#ifndef BTF_KIND_FLOAT
#define BTF_KIND_FLOAT 16 /* Floating point */
#endif
-/* The kernel header switched to enums, so these two were never #defined */
+/* The kernel header switched to enums, so the following were never #defined */
#define BTF_KIND_DECL_TAG 17 /* Decl Tag */
#define BTF_KIND_TYPE_TAG 18 /* Type Tag */
+#define BTF_KIND_ENUM64 19 /* Enum for up-to 64bit values */
static inline __u16 btf_kind(const struct btf_type *t)
{
@@ -454,6 +375,11 @@ static inline bool btf_is_enum(const struct btf_type *t)
return btf_kind(t) == BTF_KIND_ENUM;
}
+static inline bool btf_is_enum64(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_ENUM64;
+}
+
static inline bool btf_is_fwd(const struct btf_type *t)
{
return btf_kind(t) == BTF_KIND_FWD;
@@ -524,6 +450,18 @@ static inline bool btf_is_type_tag(const struct btf_type *t)
return btf_kind(t) == BTF_KIND_TYPE_TAG;
}
+static inline bool btf_is_any_enum(const struct btf_type *t)
+{
+ return btf_is_enum(t) || btf_is_enum64(t);
+}
+
+static inline bool btf_kind_core_compat(const struct btf_type *t1,
+ const struct btf_type *t2)
+{
+ return btf_kind(t1) == btf_kind(t2) ||
+ (btf_is_any_enum(t1) && btf_is_any_enum(t2));
+}
+
static inline __u8 btf_int_encoding(const struct btf_type *t)
{
return BTF_INT_ENCODING(*(__u32 *)(t + 1));
@@ -549,6 +487,16 @@ static inline struct btf_enum *btf_enum(const struct btf_type *t)
return (struct btf_enum *)(t + 1);
}
+static inline struct btf_enum64 *btf_enum64(const struct btf_type *t)
+{
+ return (struct btf_enum64 *)(t + 1);
+}
+
+static inline __u64 btf_enum64_value(const struct btf_enum64 *e)
+{
+ return ((__u64)e->val_hi32 << 32) | e->val_lo32;
+}
+
static inline struct btf_member *btf_members(const struct btf_type *t)
{
return (struct btf_member *)(t + 1);
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index 6b1bc1f43728..627edb5bb6de 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -144,15 +144,17 @@ static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...)
static int btf_dump_mark_referenced(struct btf_dump *d);
static int btf_dump_resize(struct btf_dump *d);
-DEFAULT_VERSION(btf_dump__new_v0_6_0, btf_dump__new, LIBBPF_0.6.0)
-struct btf_dump *btf_dump__new_v0_6_0(const struct btf *btf,
- btf_dump_printf_fn_t printf_fn,
- void *ctx,
- const struct btf_dump_opts *opts)
+struct btf_dump *btf_dump__new(const struct btf *btf,
+ btf_dump_printf_fn_t printf_fn,
+ void *ctx,
+ const struct btf_dump_opts *opts)
{
struct btf_dump *d;
int err;
+ if (!OPTS_VALID(opts, btf_dump_opts))
+ return libbpf_err_ptr(-EINVAL);
+
if (!printf_fn)
return libbpf_err_ptr(-EINVAL);
@@ -188,17 +190,6 @@ err:
return libbpf_err_ptr(err);
}
-COMPAT_VERSION(btf_dump__new_deprecated, btf_dump__new, LIBBPF_0.0.4)
-struct btf_dump *btf_dump__new_deprecated(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const struct btf_dump_opts *opts,
- btf_dump_printf_fn_t printf_fn)
-{
- if (!printf_fn)
- return libbpf_err_ptr(-EINVAL);
- return btf_dump__new_v0_6_0(btf, printf_fn, opts ? opts->ctx : NULL, opts);
-}
-
static int btf_dump_resize(struct btf_dump *d)
{
int err, last_id = btf__type_cnt(d->btf) - 1;
@@ -318,6 +309,7 @@ static int btf_dump_mark_referenced(struct btf_dump *d)
switch (btf_kind(t)) {
case BTF_KIND_INT:
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
case BTF_KIND_FWD:
case BTF_KIND_FLOAT:
break;
@@ -538,6 +530,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
return 1;
}
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
case BTF_KIND_FWD:
/*
* non-anonymous or non-referenced enums are top-level
@@ -739,6 +732,7 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
tstate->emit_state = EMITTED;
break;
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
if (top_level_def) {
btf_dump_emit_enum_def(d, id, t, 0);
btf_dump_printf(d, ";\n\n");
@@ -989,38 +983,81 @@ static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
btf_dump_printf(d, "enum %s", btf_dump_type_name(d, id));
}
-static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
- const struct btf_type *t,
- int lvl)
+static void btf_dump_emit_enum32_val(struct btf_dump *d,
+ const struct btf_type *t,
+ int lvl, __u16 vlen)
{
const struct btf_enum *v = btf_enum(t);
- __u16 vlen = btf_vlen(t);
+ bool is_signed = btf_kflag(t);
+ const char *fmt_str;
const char *name;
size_t dup_cnt;
int i;
+ for (i = 0; i < vlen; i++, v++) {
+ name = btf_name_of(d, v->name_off);
+ /* enumerators share namespace with typedef idents */
+ dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
+ if (dup_cnt > 1) {
+ fmt_str = is_signed ? "\n%s%s___%zd = %d," : "\n%s%s___%zd = %u,";
+ btf_dump_printf(d, fmt_str, pfx(lvl + 1), name, dup_cnt, v->val);
+ } else {
+ fmt_str = is_signed ? "\n%s%s = %d," : "\n%s%s = %u,";
+ btf_dump_printf(d, fmt_str, pfx(lvl + 1), name, v->val);
+ }
+ }
+}
+
+static void btf_dump_emit_enum64_val(struct btf_dump *d,
+ const struct btf_type *t,
+ int lvl, __u16 vlen)
+{
+ const struct btf_enum64 *v = btf_enum64(t);
+ bool is_signed = btf_kflag(t);
+ const char *fmt_str;
+ const char *name;
+ size_t dup_cnt;
+ __u64 val;
+ int i;
+
+ for (i = 0; i < vlen; i++, v++) {
+ name = btf_name_of(d, v->name_off);
+ dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
+ val = btf_enum64_value(v);
+ if (dup_cnt > 1) {
+ fmt_str = is_signed ? "\n%s%s___%zd = %lldLL,"
+ : "\n%s%s___%zd = %lluULL,";
+ btf_dump_printf(d, fmt_str,
+ pfx(lvl + 1), name, dup_cnt,
+ (unsigned long long)val);
+ } else {
+ fmt_str = is_signed ? "\n%s%s = %lldLL,"
+ : "\n%s%s = %lluULL,";
+ btf_dump_printf(d, fmt_str,
+ pfx(lvl + 1), name,
+ (unsigned long long)val);
+ }
+ }
+}
+static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
+ const struct btf_type *t,
+ int lvl)
+{
+ __u16 vlen = btf_vlen(t);
+
btf_dump_printf(d, "enum%s%s",
t->name_off ? " " : "",
btf_dump_type_name(d, id));
- if (vlen) {
- btf_dump_printf(d, " {");
- for (i = 0; i < vlen; i++, v++) {
- name = btf_name_of(d, v->name_off);
- /* enumerators share namespace with typedef idents */
- dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
- if (dup_cnt > 1) {
- btf_dump_printf(d, "\n%s%s___%zu = %u,",
- pfx(lvl + 1), name, dup_cnt,
- (__u32)v->val);
- } else {
- btf_dump_printf(d, "\n%s%s = %u,",
- pfx(lvl + 1), name,
- (__u32)v->val);
- }
- }
- btf_dump_printf(d, "\n%s}", pfx(lvl));
- }
+ if (!vlen)
+ return;
+
+ btf_dump_printf(d, " {");
+ if (btf_is_enum(t))
+ btf_dump_emit_enum32_val(d, t, lvl, vlen);
+ else
+ btf_dump_emit_enum64_val(d, t, lvl, vlen);
+ btf_dump_printf(d, "\n%s}", pfx(lvl));
}
static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id,
@@ -1178,6 +1215,7 @@ skip_mod:
break;
case BTF_KIND_INT:
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
case BTF_KIND_FWD:
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
@@ -1312,6 +1350,7 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
btf_dump_emit_struct_fwd(d, id, t);
break;
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
btf_dump_emit_mods(d, decls);
/* inline anonymous enum */
if (t->name_off == 0 && !d->skip_anon_defs)
@@ -1988,7 +2027,8 @@ static int btf_dump_get_enum_value(struct btf_dump *d,
__u32 id,
__s64 *value)
{
- /* handle unaligned enum value */
+ bool is_signed = btf_kflag(t);
+
if (!ptr_is_aligned(d->btf, id, data)) {
__u64 val;
int err;
@@ -2005,13 +2045,13 @@ static int btf_dump_get_enum_value(struct btf_dump *d,
*value = *(__s64 *)data;
return 0;
case 4:
- *value = *(__s32 *)data;
+ *value = is_signed ? (__s64)*(__s32 *)data : *(__u32 *)data;
return 0;
case 2:
- *value = *(__s16 *)data;
+ *value = is_signed ? *(__s16 *)data : *(__u16 *)data;
return 0;
case 1:
- *value = *(__s8 *)data;
+ *value = is_signed ? *(__s8 *)data : *(__u8 *)data;
return 0;
default:
pr_warn("unexpected size %d for enum, id:[%u]\n", t->size, id);
@@ -2024,7 +2064,7 @@ static int btf_dump_enum_data(struct btf_dump *d,
__u32 id,
const void *data)
{
- const struct btf_enum *e;
+ bool is_signed;
__s64 value;
int i, err;
@@ -2032,14 +2072,31 @@ static int btf_dump_enum_data(struct btf_dump *d,
if (err)
return err;
- for (i = 0, e = btf_enum(t); i < btf_vlen(t); i++, e++) {
- if (value != e->val)
- continue;
- btf_dump_type_values(d, "%s", btf_name_of(d, e->name_off));
- return 0;
- }
+ is_signed = btf_kflag(t);
+ if (btf_is_enum(t)) {
+ const struct btf_enum *e;
+
+ for (i = 0, e = btf_enum(t); i < btf_vlen(t); i++, e++) {
+ if (value != e->val)
+ continue;
+ btf_dump_type_values(d, "%s", btf_name_of(d, e->name_off));
+ return 0;
+ }
- btf_dump_type_values(d, "%d", value);
+ btf_dump_type_values(d, is_signed ? "%d" : "%u", value);
+ } else {
+ const struct btf_enum64 *e;
+
+ for (i = 0, e = btf_enum64(t); i < btf_vlen(t); i++, e++) {
+ if (value != btf_enum64_value(e))
+ continue;
+ btf_dump_type_values(d, "%s", btf_name_of(d, e->name_off));
+ return 0;
+ }
+
+ btf_dump_type_values(d, is_signed ? "%lldLL" : "%lluULL",
+ (unsigned long long)value);
+ }
return 0;
}
@@ -2099,6 +2156,7 @@ static int btf_dump_type_data_check_overflow(struct btf_dump *d,
case BTF_KIND_FLOAT:
case BTF_KIND_PTR:
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
if (data + bits_offset / 8 + size > d->typed_dump->data_end)
return -E2BIG;
break;
@@ -2203,6 +2261,7 @@ static int btf_dump_type_data_check_zero(struct btf_dump *d,
return -ENODATA;
}
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
err = btf_dump_get_enum_value(d, t, data, id, &value);
if (err)
return err;
@@ -2275,6 +2334,7 @@ static int btf_dump_dump_type_data(struct btf_dump *d,
err = btf_dump_struct_data(d, t, id, data);
break;
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
/* handle bitfield and int enum values */
if (bit_sz) {
__u64 print_num;
diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c
index 927745b08014..23f5c46708f8 100644
--- a/tools/lib/bpf/gen_loader.c
+++ b/tools/lib/bpf/gen_loader.c
@@ -533,7 +533,7 @@ void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *attach_name,
gen->attach_kind = kind;
ret = snprintf(gen->attach_target, sizeof(gen->attach_target), "%s%s",
prefix, attach_name);
- if (ret == sizeof(gen->attach_target))
+ if (ret >= sizeof(gen->attach_target))
gen->error = -ENOSPC;
}
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 809fe209cdcc..50d41815f431 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -31,7 +31,6 @@
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/filter.h>
-#include <linux/list.h>
#include <linux/limits.h>
#include <linux/perf_event.h>
#include <linux/ring_buffer.h>
@@ -72,6 +71,135 @@
static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
+static const char * const attach_type_name[] = {
+ [BPF_CGROUP_INET_INGRESS] = "cgroup_inet_ingress",
+ [BPF_CGROUP_INET_EGRESS] = "cgroup_inet_egress",
+ [BPF_CGROUP_INET_SOCK_CREATE] = "cgroup_inet_sock_create",
+ [BPF_CGROUP_INET_SOCK_RELEASE] = "cgroup_inet_sock_release",
+ [BPF_CGROUP_SOCK_OPS] = "cgroup_sock_ops",
+ [BPF_CGROUP_DEVICE] = "cgroup_device",
+ [BPF_CGROUP_INET4_BIND] = "cgroup_inet4_bind",
+ [BPF_CGROUP_INET6_BIND] = "cgroup_inet6_bind",
+ [BPF_CGROUP_INET4_CONNECT] = "cgroup_inet4_connect",
+ [BPF_CGROUP_INET6_CONNECT] = "cgroup_inet6_connect",
+ [BPF_CGROUP_INET4_POST_BIND] = "cgroup_inet4_post_bind",
+ [BPF_CGROUP_INET6_POST_BIND] = "cgroup_inet6_post_bind",
+ [BPF_CGROUP_INET4_GETPEERNAME] = "cgroup_inet4_getpeername",
+ [BPF_CGROUP_INET6_GETPEERNAME] = "cgroup_inet6_getpeername",
+ [BPF_CGROUP_INET4_GETSOCKNAME] = "cgroup_inet4_getsockname",
+ [BPF_CGROUP_INET6_GETSOCKNAME] = "cgroup_inet6_getsockname",
+ [BPF_CGROUP_UDP4_SENDMSG] = "cgroup_udp4_sendmsg",
+ [BPF_CGROUP_UDP6_SENDMSG] = "cgroup_udp6_sendmsg",
+ [BPF_CGROUP_SYSCTL] = "cgroup_sysctl",
+ [BPF_CGROUP_UDP4_RECVMSG] = "cgroup_udp4_recvmsg",
+ [BPF_CGROUP_UDP6_RECVMSG] = "cgroup_udp6_recvmsg",
+ [BPF_CGROUP_GETSOCKOPT] = "cgroup_getsockopt",
+ [BPF_CGROUP_SETSOCKOPT] = "cgroup_setsockopt",
+ [BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser",
+ [BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict",
+ [BPF_SK_SKB_VERDICT] = "sk_skb_verdict",
+ [BPF_SK_MSG_VERDICT] = "sk_msg_verdict",
+ [BPF_LIRC_MODE2] = "lirc_mode2",
+ [BPF_FLOW_DISSECTOR] = "flow_dissector",
+ [BPF_TRACE_RAW_TP] = "trace_raw_tp",
+ [BPF_TRACE_FENTRY] = "trace_fentry",
+ [BPF_TRACE_FEXIT] = "trace_fexit",
+ [BPF_MODIFY_RETURN] = "modify_return",
+ [BPF_LSM_MAC] = "lsm_mac",
+ [BPF_LSM_CGROUP] = "lsm_cgroup",
+ [BPF_SK_LOOKUP] = "sk_lookup",
+ [BPF_TRACE_ITER] = "trace_iter",
+ [BPF_XDP_DEVMAP] = "xdp_devmap",
+ [BPF_XDP_CPUMAP] = "xdp_cpumap",
+ [BPF_XDP] = "xdp",
+ [BPF_SK_REUSEPORT_SELECT] = "sk_reuseport_select",
+ [BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_reuseport_select_or_migrate",
+ [BPF_PERF_EVENT] = "perf_event",
+ [BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi",
+};
+
+static const char * const link_type_name[] = {
+ [BPF_LINK_TYPE_UNSPEC] = "unspec",
+ [BPF_LINK_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
+ [BPF_LINK_TYPE_TRACING] = "tracing",
+ [BPF_LINK_TYPE_CGROUP] = "cgroup",
+ [BPF_LINK_TYPE_ITER] = "iter",
+ [BPF_LINK_TYPE_NETNS] = "netns",
+ [BPF_LINK_TYPE_XDP] = "xdp",
+ [BPF_LINK_TYPE_PERF_EVENT] = "perf_event",
+ [BPF_LINK_TYPE_KPROBE_MULTI] = "kprobe_multi",
+ [BPF_LINK_TYPE_STRUCT_OPS] = "struct_ops",
+};
+
+static const char * const map_type_name[] = {
+ [BPF_MAP_TYPE_UNSPEC] = "unspec",
+ [BPF_MAP_TYPE_HASH] = "hash",
+ [BPF_MAP_TYPE_ARRAY] = "array",
+ [BPF_MAP_TYPE_PROG_ARRAY] = "prog_array",
+ [BPF_MAP_TYPE_PERF_EVENT_ARRAY] = "perf_event_array",
+ [BPF_MAP_TYPE_PERCPU_HASH] = "percpu_hash",
+ [BPF_MAP_TYPE_PERCPU_ARRAY] = "percpu_array",
+ [BPF_MAP_TYPE_STACK_TRACE] = "stack_trace",
+ [BPF_MAP_TYPE_CGROUP_ARRAY] = "cgroup_array",
+ [BPF_MAP_TYPE_LRU_HASH] = "lru_hash",
+ [BPF_MAP_TYPE_LRU_PERCPU_HASH] = "lru_percpu_hash",
+ [BPF_MAP_TYPE_LPM_TRIE] = "lpm_trie",
+ [BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps",
+ [BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps",
+ [BPF_MAP_TYPE_DEVMAP] = "devmap",
+ [BPF_MAP_TYPE_DEVMAP_HASH] = "devmap_hash",
+ [BPF_MAP_TYPE_SOCKMAP] = "sockmap",
+ [BPF_MAP_TYPE_CPUMAP] = "cpumap",
+ [BPF_MAP_TYPE_XSKMAP] = "xskmap",
+ [BPF_MAP_TYPE_SOCKHASH] = "sockhash",
+ [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage",
+ [BPF_MAP_TYPE_REUSEPORT_SOCKARRAY] = "reuseport_sockarray",
+ [BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage",
+ [BPF_MAP_TYPE_QUEUE] = "queue",
+ [BPF_MAP_TYPE_STACK] = "stack",
+ [BPF_MAP_TYPE_SK_STORAGE] = "sk_storage",
+ [BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops",
+ [BPF_MAP_TYPE_RINGBUF] = "ringbuf",
+ [BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage",
+ [BPF_MAP_TYPE_TASK_STORAGE] = "task_storage",
+ [BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter",
+};
+
+static const char * const prog_type_name[] = {
+ [BPF_PROG_TYPE_UNSPEC] = "unspec",
+ [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
+ [BPF_PROG_TYPE_KPROBE] = "kprobe",
+ [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls",
+ [BPF_PROG_TYPE_SCHED_ACT] = "sched_act",
+ [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint",
+ [BPF_PROG_TYPE_XDP] = "xdp",
+ [BPF_PROG_TYPE_PERF_EVENT] = "perf_event",
+ [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb",
+ [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock",
+ [BPF_PROG_TYPE_LWT_IN] = "lwt_in",
+ [BPF_PROG_TYPE_LWT_OUT] = "lwt_out",
+ [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
+ [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
+ [BPF_PROG_TYPE_SK_SKB] = "sk_skb",
+ [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
+ [BPF_PROG_TYPE_SK_MSG] = "sk_msg",
+ [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
+ [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr",
+ [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local",
+ [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2",
+ [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport",
+ [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
+ [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl",
+ [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
+ [BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt",
+ [BPF_PROG_TYPE_TRACING] = "tracing",
+ [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops",
+ [BPF_PROG_TYPE_EXT] = "ext",
+ [BPF_PROG_TYPE_LSM] = "lsm",
+ [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup",
+ [BPF_PROG_TYPE_SYSCALL] = "syscall",
+};
+
static int __base_pr(enum libbpf_print_level level, const char *format,
va_list args)
{
@@ -151,12 +279,9 @@ static inline __u64 ptr_to_u64(const void *ptr)
return (__u64) (unsigned long) ptr;
}
-/* this goes away in libbpf 1.0 */
-enum libbpf_strict_mode libbpf_mode = LIBBPF_STRICT_NONE;
-
int libbpf_set_strict_mode(enum libbpf_strict_mode mode)
{
- libbpf_mode = mode;
+ /* as of v1.0 libbpf_set_strict_mode() is a no-op */
return 0;
}
@@ -219,12 +344,8 @@ enum sec_def_flags {
SEC_ATTACH_BTF = 4,
/* BPF program type allows sleeping/blocking in kernel */
SEC_SLEEPABLE = 8,
- /* allow non-strict prefix matching */
- SEC_SLOPPY_PFX = 16,
/* BPF program support non-linear XDP buffer */
- SEC_XDP_FRAGS = 32,
- /* deprecated sec definitions not supposed to be used */
- SEC_DEPRECATED = 64,
+ SEC_XDP_FRAGS = 16,
};
struct bpf_sec_def {
@@ -244,9 +365,10 @@ struct bpf_sec_def {
* linux/filter.h.
*/
struct bpf_program {
- const struct bpf_sec_def *sec_def;
+ char *name;
char *sec_name;
size_t sec_idx;
+ const struct bpf_sec_def *sec_def;
/* this program's instruction offset (in number of instructions)
* within its containing ELF section
*/
@@ -266,12 +388,6 @@ struct bpf_program {
*/
size_t sub_insn_off;
- char *name;
- /* name with / replaced by _; makes recursive pinning
- * in bpf_object__pin_programs easier
- */
- char *pin_name;
-
/* instructions that belong to BPF program; insns[0] is located at
* sec_insn_off instruction within its ELF section in ELF file, so
* when mapping ELF file instruction index to the local instruction,
@@ -292,24 +408,19 @@ struct bpf_program {
size_t log_size;
__u32 log_level;
- struct {
- int nr;
- int *fds;
- } instances;
- bpf_program_prep_t preprocessor;
-
struct bpf_object *obj;
- void *priv;
- bpf_program_clear_priv_t clear_priv;
- bool load;
+ int fd;
+ bool autoload;
bool mark_btf_static;
enum bpf_prog_type type;
enum bpf_attach_type expected_attach_type;
+
int prog_ifindex;
__u32 attach_btf_obj_fd;
__u32 attach_btf_id;
__u32 attach_prog_fd;
+
void *func_info;
__u32 func_info_rec_size;
__u32 func_info_cnt;
@@ -356,7 +467,16 @@ enum libbpf_map_type {
LIBBPF_MAP_KCONFIG,
};
+struct bpf_map_def {
+ unsigned int type;
+ unsigned int key_size;
+ unsigned int value_size;
+ unsigned int max_entries;
+ unsigned int map_flags;
+};
+
struct bpf_map {
+ struct bpf_object *obj;
char *name;
/* real_name is defined for special internal maps (.rodata*,
* .data*, .bss, .kconfig) and preserves their original ELF section
@@ -375,8 +495,6 @@ struct bpf_map {
__u32 btf_key_type_id;
__u32 btf_value_type_id;
__u32 btf_vmlinux_value_type_id;
- void *priv;
- bpf_map_clear_priv_t clear_priv;
enum libbpf_map_type libbpf_type;
void *mmaped;
struct bpf_struct_ops *st_ops;
@@ -386,7 +504,7 @@ struct bpf_map {
char *pin_path;
bool pinned;
bool reused;
- bool skipped;
+ bool autocreate;
__u64 map_extra;
};
@@ -439,8 +557,6 @@ struct extern_desc {
};
};
-static LIST_HEAD(bpf_objects_list);
-
struct module_btf {
struct btf *btf;
char *name;
@@ -483,6 +599,8 @@ struct elf_state {
int st_ops_shndx;
};
+struct usdt_manager;
+
struct bpf_object {
char name[BPF_OBJ_NAME_LEN];
char license[64];
@@ -507,12 +625,6 @@ struct bpf_object {
/* Information when doing ELF related work. Only valid if efile.elf is not NULL */
struct elf_state efile;
- /*
- * All loaded bpf_object are linked in a list, which is
- * hidden to caller. bpf_objects__<func> handlers deal with
- * all objects.
- */
- struct list_head list;
struct btf *btf;
struct btf_ext *btf_ext;
@@ -538,13 +650,12 @@ struct bpf_object {
size_t log_size;
__u32 log_level;
- void *priv;
- bpf_object_clear_priv_t clear_priv;
-
int *fd_array;
size_t fd_array_cap;
size_t fd_array_cnt;
+ struct usdt_manager *usdt_man;
+
char path[];
};
@@ -560,25 +671,10 @@ static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx);
void bpf_program__unload(struct bpf_program *prog)
{
- int i;
-
if (!prog)
return;
- /*
- * If the object is opened but the program was never loaded,
- * it is possible that prog->instances.nr == -1.
- */
- if (prog->instances.nr > 0) {
- for (i = 0; i < prog->instances.nr; i++)
- zclose(prog->instances.fds[i]);
- } else if (prog->instances.nr != -1) {
- pr_warn("Internal error: instances.nr is %d\n",
- prog->instances.nr);
- }
-
- prog->instances.nr = -1;
- zfree(&prog->instances.fds);
+ zclose(prog->fd);
zfree(&prog->func_info);
zfree(&prog->line_info);
@@ -589,16 +685,9 @@ static void bpf_program__exit(struct bpf_program *prog)
if (!prog)
return;
- if (prog->clear_priv)
- prog->clear_priv(prog, prog->priv);
-
- prog->priv = NULL;
- prog->clear_priv = NULL;
-
bpf_program__unload(prog);
zfree(&prog->name);
zfree(&prog->sec_name);
- zfree(&prog->pin_name);
zfree(&prog->insns);
zfree(&prog->reloc_desc);
@@ -607,26 +696,6 @@ static void bpf_program__exit(struct bpf_program *prog)
prog->sec_idx = -1;
}
-static char *__bpf_program__pin_name(struct bpf_program *prog)
-{
- char *name, *p;
-
- if (libbpf_mode & LIBBPF_STRICT_SEC_NAME)
- name = strdup(prog->name);
- else
- name = strdup(prog->sec_name);
-
- if (!name)
- return NULL;
-
- p = name;
-
- while ((p = strchr(p, '/')))
- *p = '_';
-
- return name;
-}
-
static bool insn_is_subprog_call(const struct bpf_insn *insn)
{
return BPF_CLASS(insn->code) == BPF_JMP &&
@@ -668,10 +737,19 @@ bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
prog->insns_cnt = prog->sec_insn_cnt;
prog->type = BPF_PROG_TYPE_UNSPEC;
- prog->load = true;
+ prog->fd = -1;
- prog->instances.fds = NULL;
- prog->instances.nr = -1;
+ /* libbpf's convention for SEC("?abc...") is that it's just like
+ * SEC("abc...") but the corresponding bpf_program starts out with
+ * autoload set to false.
+ */
+ if (sec_name[0] == '?') {
+ prog->autoload = false;
+ /* from now on forget there was ? in section name */
+ sec_name++;
+ } else {
+ prog->autoload = true;
+ }
/* inherit object's log_level */
prog->log_level = obj->log_level;
@@ -684,10 +762,6 @@ bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
if (!prog->name)
goto errout;
- prog->pin_name = __bpf_program__pin_name(prog);
- if (!prog->pin_name)
- goto errout;
-
prog->insns = malloc(insn_data_sz);
if (!prog->insns)
goto errout;
@@ -1169,7 +1243,6 @@ static struct bpf_object *bpf_object__new(const char *path,
size_t obj_buf_sz,
const char *obj_name)
{
- bool strict = (libbpf_mode & LIBBPF_STRICT_NO_OBJECT_LIST);
struct bpf_object *obj;
char *end;
@@ -1207,9 +1280,6 @@ static struct bpf_object *bpf_object__new(const char *path,
obj->kern_version = get_kernel_version();
obj->loaded = false;
- INIT_LIST_HEAD(&obj->list);
- if (!strict)
- list_add(&obj->list, &bpf_objects_list);
return obj;
}
@@ -1218,10 +1288,8 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
if (!obj->efile.elf)
return;
- if (obj->efile.elf) {
- elf_end(obj->efile.elf);
- obj->efile.elf = NULL;
- }
+ elf_end(obj->efile.elf);
+ obj->efile.elf = NULL;
obj->efile.symbols = NULL;
obj->efile.st_ops_data = NULL;
@@ -1244,10 +1312,7 @@ static int bpf_object__elf_init(struct bpf_object *obj)
}
if (obj->efile.obj_buf_sz > 0) {
- /*
- * obj_buf should have been validated by
- * bpf_object__open_buffer().
- */
+ /* obj_buf should have been validated by bpf_object__open_mem(). */
elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
} else {
obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC);
@@ -1397,8 +1462,11 @@ static int find_elf_var_offset(const struct bpf_object *obj, const char *name, _
for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
Elf64_Sym *sym = elf_sym_by_idx(obj, si);
- if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL ||
- ELF64_ST_TYPE(sym->st_info) != STT_OBJECT)
+ if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT)
+ continue;
+
+ if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
+ ELF64_ST_BIND(sym->st_info) != STB_WEAK)
continue;
sname = elf_sym_str(obj, sym->st_name);
@@ -1417,36 +1485,21 @@ static int find_elf_var_offset(const struct bpf_object *obj, const char *name, _
static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
{
- struct bpf_map *new_maps;
- size_t new_cap;
- int i;
-
- if (obj->nr_maps < obj->maps_cap)
- return &obj->maps[obj->nr_maps++];
+ struct bpf_map *map;
+ int err;
- new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
- new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps));
- if (!new_maps) {
- pr_warn("alloc maps for object failed\n");
- return ERR_PTR(-ENOMEM);
- }
+ err = libbpf_ensure_mem((void **)&obj->maps, &obj->maps_cap,
+ sizeof(*obj->maps), obj->nr_maps + 1);
+ if (err)
+ return ERR_PTR(err);
- obj->maps_cap = new_cap;
- obj->maps = new_maps;
+ map = &obj->maps[obj->nr_maps++];
+ map->obj = obj;
+ map->fd = -1;
+ map->inner_map_fd = -1;
+ map->autocreate = true;
- /* zero out new maps */
- memset(obj->maps + obj->nr_maps, 0,
- (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
- /*
- * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
- * when failure (zclose won't close negative fd)).
- */
- for (i = obj->nr_maps; i < obj->maps_cap; i++) {
- obj->maps[i].fd = -1;
- obj->maps[i].inner_map_fd = -1;
- }
-
- return &obj->maps[obj->nr_maps++];
+ return map;
}
static size_t bpf_map_mmap_sz(const struct bpf_map *map)
@@ -1641,7 +1694,7 @@ static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
switch (ext->kcfg.type) {
case KCFG_BOOL:
if (value == 'm') {
- pr_warn("extern (kcfg) %s=%c should be tristate or char\n",
+ pr_warn("extern (kcfg) '%s': value '%c' implies tristate or char type\n",
ext->name, value);
return -EINVAL;
}
@@ -1662,7 +1715,7 @@ static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
case KCFG_INT:
case KCFG_CHAR_ARR:
default:
- pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n",
+ pr_warn("extern (kcfg) '%s': value '%c' implies bool, tristate, or char type\n",
ext->name, value);
return -EINVAL;
}
@@ -1676,7 +1729,8 @@ static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
size_t len;
if (ext->kcfg.type != KCFG_CHAR_ARR) {
- pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value);
+ pr_warn("extern (kcfg) '%s': value '%s' implies char array type\n",
+ ext->name, value);
return -EINVAL;
}
@@ -1690,7 +1744,7 @@ static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
/* strip quotes */
len -= 2;
if (len >= ext->kcfg.sz) {
- pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n",
+ pr_warn("extern (kcfg) '%s': long string '%s' of (%zu bytes) truncated to %d bytes\n",
ext->name, value, len, ext->kcfg.sz - 1);
len = ext->kcfg.sz - 1;
}
@@ -1747,13 +1801,20 @@ static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
__u64 value)
{
- if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
- pr_warn("extern (kcfg) %s=%llu should be integer\n",
+ if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR &&
+ ext->kcfg.type != KCFG_BOOL) {
+ pr_warn("extern (kcfg) '%s': value '%llu' implies integer, char, or boolean type\n",
ext->name, (unsigned long long)value);
return -EINVAL;
}
+ if (ext->kcfg.type == KCFG_BOOL && value > 1) {
+ pr_warn("extern (kcfg) '%s': value '%llu' isn't boolean compatible\n",
+ ext->name, (unsigned long long)value);
+ return -EINVAL;
+
+ }
if (!is_kcfg_value_in_range(ext, value)) {
- pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n",
+ pr_warn("extern (kcfg) '%s': value '%llu' doesn't fit in %d bytes\n",
ext->name, (unsigned long long)value, ext->kcfg.sz);
return -ERANGE;
}
@@ -1817,16 +1878,19 @@ static int bpf_object__process_kconfig_line(struct bpf_object *obj,
/* assume integer */
err = parse_u64(value, &num);
if (err) {
- pr_warn("extern (kcfg) %s=%s should be integer\n",
- ext->name, value);
+ pr_warn("extern (kcfg) '%s': value '%s' isn't a valid integer\n", ext->name, value);
return err;
}
+ if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
+ pr_warn("extern (kcfg) '%s': value '%s' implies integer type\n", ext->name, value);
+ return -EINVAL;
+ }
err = set_kcfg_value_num(ext, ext_val, num);
break;
}
if (err)
return err;
- pr_debug("extern (kcfg) %s=%s\n", ext->name, value);
+ pr_debug("extern (kcfg) '%s': set to %s\n", ext->name, value);
return 0;
}
@@ -1922,143 +1986,6 @@ static int bpf_object__init_kconfig_map(struct bpf_object *obj)
return 0;
}
-static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
-{
- Elf_Data *symbols = obj->efile.symbols;
- int i, map_def_sz = 0, nr_maps = 0, nr_syms;
- Elf_Data *data = NULL;
- Elf_Scn *scn;
-
- if (obj->efile.maps_shndx < 0)
- return 0;
-
- if (libbpf_mode & LIBBPF_STRICT_MAP_DEFINITIONS) {
- pr_warn("legacy map definitions in SEC(\"maps\") are not supported\n");
- return -EOPNOTSUPP;
- }
-
- if (!symbols)
- return -EINVAL;
-
- scn = elf_sec_by_idx(obj, obj->efile.maps_shndx);
- data = elf_sec_data(obj, scn);
- if (!scn || !data) {
- pr_warn("elf: failed to get legacy map definitions for %s\n",
- obj->path);
- return -EINVAL;
- }
-
- /*
- * Count number of maps. Each map has a name.
- * Array of maps is not supported: only the first element is
- * considered.
- *
- * TODO: Detect array of map and report error.
- */
- nr_syms = symbols->d_size / sizeof(Elf64_Sym);
- for (i = 0; i < nr_syms; i++) {
- Elf64_Sym *sym = elf_sym_by_idx(obj, i);
-
- if (sym->st_shndx != obj->efile.maps_shndx)
- continue;
- if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION)
- continue;
- nr_maps++;
- }
- /* Assume equally sized map definitions */
- pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n",
- nr_maps, data->d_size, obj->path);
-
- if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
- pr_warn("elf: unable to determine legacy map definition size in %s\n",
- obj->path);
- return -EINVAL;
- }
- map_def_sz = data->d_size / nr_maps;
-
- /* Fill obj->maps using data in "maps" section. */
- for (i = 0; i < nr_syms; i++) {
- Elf64_Sym *sym = elf_sym_by_idx(obj, i);
- const char *map_name;
- struct bpf_map_def *def;
- struct bpf_map *map;
-
- if (sym->st_shndx != obj->efile.maps_shndx)
- continue;
- if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION)
- continue;
-
- map = bpf_object__add_map(obj);
- if (IS_ERR(map))
- return PTR_ERR(map);
-
- map_name = elf_sym_str(obj, sym->st_name);
- if (!map_name) {
- pr_warn("failed to get map #%d name sym string for obj %s\n",
- i, obj->path);
- return -LIBBPF_ERRNO__FORMAT;
- }
-
- pr_warn("map '%s' (legacy): legacy map definitions are deprecated, use BTF-defined maps instead\n", map_name);
-
- if (ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
- pr_warn("map '%s' (legacy): static maps are not supported\n", map_name);
- return -ENOTSUP;
- }
-
- map->libbpf_type = LIBBPF_MAP_UNSPEC;
- map->sec_idx = sym->st_shndx;
- map->sec_offset = sym->st_value;
- pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
- map_name, map->sec_idx, map->sec_offset);
- if (sym->st_value + map_def_sz > data->d_size) {
- pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
- obj->path, map_name);
- return -EINVAL;
- }
-
- map->name = strdup(map_name);
- if (!map->name) {
- pr_warn("map '%s': failed to alloc map name\n", map_name);
- return -ENOMEM;
- }
- pr_debug("map %d is \"%s\"\n", i, map->name);
- def = (struct bpf_map_def *)(data->d_buf + sym->st_value);
- /*
- * If the definition of the map in the object file fits in
- * bpf_map_def, copy it. Any extra fields in our version
- * of bpf_map_def will default to zero as a result of the
- * calloc above.
- */
- if (map_def_sz <= sizeof(struct bpf_map_def)) {
- memcpy(&map->def, def, map_def_sz);
- } else {
- /*
- * Here the map structure being read is bigger than what
- * we expect, truncate if the excess bits are all zero.
- * If they are not zero, reject this map as
- * incompatible.
- */
- char *b;
-
- for (b = ((char *)def) + sizeof(struct bpf_map_def);
- b < ((char *)def) + map_def_sz; b++) {
- if (*b != 0) {
- pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
- obj->path, map_name);
- if (strict)
- return -EINVAL;
- }
- }
- memcpy(&map->def, def, sizeof(struct bpf_map_def));
- }
-
- /* btf info may not exist but fill it in if it does exist */
- (void) bpf_map_find_btf_info(obj, map);
- }
- return 0;
-}
-
const struct btf_type *
skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
{
@@ -2112,6 +2039,7 @@ static const char *__btf_kind_str(__u16 kind)
case BTF_KIND_FLOAT: return "float";
case BTF_KIND_DECL_TAG: return "decl_tag";
case BTF_KIND_TYPE_TAG: return "type_tag";
+ case BTF_KIND_ENUM64: return "enum64";
default: return "unknown";
}
}
@@ -2175,6 +2103,13 @@ static int build_map_pin_path(struct bpf_map *map, const char *path)
return bpf_map__set_pin_path(map, buf);
}
+/* should match definition in bpf_helpers.h */
+enum libbpf_pin_type {
+ LIBBPF_PIN_NONE,
+ /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
+ LIBBPF_PIN_BY_NAME,
+};
+
int parse_btf_map_def(const char *map_name, struct btf *btf,
const struct btf_type *def_t, bool strict,
struct btf_map_def *map_def, struct btf_map_def *inner_def)
@@ -2396,6 +2331,37 @@ int parse_btf_map_def(const char *map_name, struct btf *btf,
return 0;
}
+static size_t adjust_ringbuf_sz(size_t sz)
+{
+ __u32 page_sz = sysconf(_SC_PAGE_SIZE);
+ __u32 mul;
+
+ /* if user forgot to set any size, make sure they see error */
+ if (sz == 0)
+ return 0;
+ /* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be
+ * a power-of-2 multiple of kernel's page size. If user diligently
+ * satisified these conditions, pass the size through.
+ */
+ if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz))
+ return sz;
+
+ /* Otherwise find closest (page_sz * power_of_2) product bigger than
+ * user-set size to satisfy both user size request and kernel
+ * requirements and substitute correct max_entries for map creation.
+ */
+ for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) {
+ if (mul * page_sz > sz)
+ return mul * page_sz;
+ }
+
+ /* if it's impossible to satisfy the conditions (i.e., user size is
+ * very close to UINT_MAX but is not a power-of-2 multiple of
+ * page_size) then just return original size and let kernel reject it
+ */
+ return sz;
+}
+
static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
{
map->def.type = def->map_type;
@@ -2409,6 +2375,10 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def
map->btf_key_type_id = def->key_type_id;
map->btf_value_type_id = def->value_type_id;
+ /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
+ if (map->def.type == BPF_MAP_TYPE_RINGBUF)
+ map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
+
if (def->parts & MAP_DEF_MAP_TYPE)
pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
@@ -2607,12 +2577,11 @@ static int bpf_object__init_maps(struct bpf_object *obj,
{
const char *pin_root_path;
bool strict;
- int err;
+ int err = 0;
strict = !OPTS_GET(opts, relaxed_maps, false);
pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
- err = bpf_object__init_user_maps(obj, strict);
err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
err = err ?: bpf_object__init_global_data_maps(obj);
err = err ?: bpf_object__init_kconfig_map(obj);
@@ -2640,12 +2609,13 @@ static bool btf_needs_sanitization(struct bpf_object *obj)
bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
+ bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
return !has_func || !has_datasec || !has_func_global || !has_float ||
- !has_decl_tag || !has_type_tag;
+ !has_decl_tag || !has_type_tag || !has_enum64;
}
-static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
+static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
{
bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
@@ -2653,6 +2623,8 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
+ bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
+ int enum64_placeholder_id = 0;
struct btf_type *t;
int i, j, vlen;
@@ -2715,8 +2687,32 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
/* replace TYPE_TAG with a CONST */
t->name_off = 0;
t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0);
- }
+ } else if (!has_enum64 && btf_is_enum(t)) {
+ /* clear the kflag */
+ t->info = btf_type_info(btf_kind(t), btf_vlen(t), false);
+ } else if (!has_enum64 && btf_is_enum64(t)) {
+ /* replace ENUM64 with a union */
+ struct btf_member *m;
+
+ if (enum64_placeholder_id == 0) {
+ enum64_placeholder_id = btf__add_int(btf, "enum64_placeholder", 1, 0);
+ if (enum64_placeholder_id < 0)
+ return enum64_placeholder_id;
+
+ t = (struct btf_type *)btf__type_by_id(btf, i);
+ }
+
+ m = btf_members(t);
+ vlen = btf_vlen(t);
+ t->info = BTF_INFO_ENC(BTF_KIND_UNION, 0, vlen);
+ for (j = 0; j < vlen; j++, m++) {
+ m->type = enum64_placeholder_id;
+ m->offset = 0;
+ }
+ }
}
+
+ return 0;
}
static bool libbpf_needs_btf(const struct bpf_object *obj)
@@ -2749,6 +2745,9 @@ static int bpf_object__init_btf(struct bpf_object *obj,
btf__set_pointer_size(obj->btf, 8);
}
if (btf_ext_data) {
+ struct btf_ext_info *ext_segs[3];
+ int seg_num, sec_num;
+
if (!obj->btf) {
pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
BTF_EXT_ELF_SEC, BTF_ELF_SEC);
@@ -2762,6 +2761,43 @@ static int bpf_object__init_btf(struct bpf_object *obj,
obj->btf_ext = NULL;
goto out;
}
+
+ /* setup .BTF.ext to ELF section mapping */
+ ext_segs[0] = &obj->btf_ext->func_info;
+ ext_segs[1] = &obj->btf_ext->line_info;
+ ext_segs[2] = &obj->btf_ext->core_relo_info;
+ for (seg_num = 0; seg_num < ARRAY_SIZE(ext_segs); seg_num++) {
+ struct btf_ext_info *seg = ext_segs[seg_num];
+ const struct btf_ext_info_sec *sec;
+ const char *sec_name;
+ Elf_Scn *scn;
+
+ if (seg->sec_cnt == 0)
+ continue;
+
+ seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs));
+ if (!seg->sec_idxs) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ sec_num = 0;
+ for_each_btf_ext_sec(seg, sec) {
+ /* preventively increment index to avoid doing
+ * this before every continue below
+ */
+ sec_num++;
+
+ sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
+ if (str_is_empty(sec_name))
+ continue;
+ scn = elf_sec_by_name(obj, sec_name);
+ if (!scn)
+ continue;
+
+ seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn);
+ }
+ }
}
out:
if (err && libbpf_needs_btf(obj)) {
@@ -2863,11 +2899,6 @@ static int btf_finalize_data(struct bpf_object *obj, struct btf *btf)
return libbpf_err(err);
}
-int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
-{
- return btf_finalize_data(obj, btf);
-}
-
static int bpf_object__finalize_btf(struct bpf_object *obj)
{
int err;
@@ -2920,7 +2951,7 @@ static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
}
bpf_object__for_each_program(prog, obj) {
- if (!prog->load)
+ if (!prog->autoload)
continue;
if (prog_needs_vmlinux_btf(prog))
return true;
@@ -3014,7 +3045,9 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
/* enforce 8-byte pointers for BPF-targeted BTFs */
btf__set_pointer_size(obj->btf, 8);
- bpf_object__sanitize_btf(obj, kern_btf);
+ err = bpf_object__sanitize_btf(obj, kern_btf);
+ if (err)
+ return err;
}
if (obj->gen_loader) {
@@ -3521,6 +3554,10 @@ static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
if (strcmp(name, "libbpf_tristate"))
return KCFG_UNKNOWN;
return KCFG_TRISTATE;
+ case BTF_KIND_ENUM64:
+ if (strcmp(name, "libbpf_tristate"))
+ return KCFG_UNKNOWN;
+ return KCFG_TRISTATE;
case BTF_KIND_ARRAY:
if (btf_array(t)->nelems == 0)
return KCFG_UNKNOWN;
@@ -3696,7 +3733,7 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
&ext->kcfg.is_signed);
if (ext->kcfg.type == KCFG_UNKNOWN) {
- pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name);
+ pr_warn("extern (kcfg) '%s': type is unsupported\n", ext_name);
return -ENOTSUP;
}
} else if (strcmp(sec_name, KSYMS_SEC) == 0) {
@@ -3818,41 +3855,8 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
return 0;
}
-struct bpf_program *
-bpf_object__find_program_by_title(const struct bpf_object *obj,
- const char *title)
+static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog)
{
- struct bpf_program *pos;
-
- bpf_object__for_each_program(pos, obj) {
- if (pos->sec_name && !strcmp(pos->sec_name, title))
- return pos;
- }
- return errno = ENOENT, NULL;
-}
-
-static bool prog_is_subprog(const struct bpf_object *obj,
- const struct bpf_program *prog)
-{
- /* For legacy reasons, libbpf supports an entry-point BPF programs
- * without SEC() attribute, i.e., those in the .text section. But if
- * there are 2 or more such programs in the .text section, they all
- * must be subprograms called from entry-point BPF programs in
- * designated SEC()'tions, otherwise there is no way to distinguish
- * which of those programs should be loaded vs which are a subprogram.
- * Similarly, if there is a function/program in .text and at least one
- * other BPF program with custom SEC() attribute, then we just assume
- * .text programs are subprograms (even if they are not called from
- * other programs), because libbpf never explicitly supported mixing
- * SEC()-designated BPF programs and .text entry-point BPF programs.
- *
- * In libbpf 1.0 strict mode, we always consider .text
- * programs to be subprograms.
- */
-
- if (libbpf_mode & LIBBPF_STRICT_SEC_NAME)
- return prog->sec_idx == obj->efile.text_shndx;
-
return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
}
@@ -4193,9 +4197,7 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Dat
static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
{
- struct bpf_map_def *def = &map->def;
- __u32 key_type_id = 0, value_type_id = 0;
- int ret;
+ int id;
if (!obj->btf)
return -ENOENT;
@@ -4204,31 +4206,22 @@ static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
* For struct_ops map, it does not need btf_key_type_id and
* btf_value_type_id.
*/
- if (map->sec_idx == obj->efile.btf_maps_shndx ||
- bpf_map__is_struct_ops(map))
+ if (map->sec_idx == obj->efile.btf_maps_shndx || bpf_map__is_struct_ops(map))
return 0;
- if (!bpf_map__is_internal(map)) {
- pr_warn("Use of BPF_ANNOTATE_KV_PAIR is deprecated, use BTF-defined maps in .maps section instead\n");
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
- ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
- def->value_size, &key_type_id,
- &value_type_id);
-#pragma GCC diagnostic pop
- } else {
- /*
- * LLVM annotates global data differently in BTF, that is,
- * only as '.data', '.bss' or '.rodata'.
- */
- ret = btf__find_by_name(obj->btf, map->real_name);
- }
- if (ret < 0)
- return ret;
+ /*
+ * LLVM annotates global data differently in BTF, that is,
+ * only as '.data', '.bss' or '.rodata'.
+ */
+ if (!bpf_map__is_internal(map))
+ return -ENOENT;
- map->btf_key_type_id = key_type_id;
- map->btf_value_type_id = bpf_map__is_internal(map) ?
- ret : value_type_id;
+ id = btf__find_by_name(obj->btf, map->real_name);
+ if (id < 0)
+ return id;
+
+ map->btf_key_type_id = 0;
+ map->btf_value_type_id = id;
return 0;
}
@@ -4268,10 +4261,24 @@ static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
return 0;
}
+bool bpf_map__autocreate(const struct bpf_map *map)
+{
+ return map->autocreate;
+}
+
+int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
+{
+ if (map->obj->loaded)
+ return libbpf_err(-EBUSY);
+
+ map->autocreate = autocreate;
+ return 0;
+}
+
int bpf_map__reuse_fd(struct bpf_map *map, int fd)
{
struct bpf_map_info info = {};
- __u32 len = sizeof(info);
+ __u32 len = sizeof(info), name_len;
int new_fd, err;
char *new_name;
@@ -4281,7 +4288,12 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
if (err)
return libbpf_err(err);
- new_name = strdup(info.name);
+ name_len = strlen(info.name);
+ if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0)
+ new_name = strdup(map->name);
+ else
+ new_name = strdup(info.name);
+
if (!new_name)
return libbpf_err(-errno);
@@ -4340,18 +4352,16 @@ struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
{
- if (map->fd >= 0)
+ if (map->obj->loaded)
return libbpf_err(-EBUSY);
+
map->def.max_entries = max_entries;
- return 0;
-}
-int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
-{
- if (!map || !max_entries)
- return libbpf_err(-EINVAL);
+ /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
+ if (map->def.type == BPF_MAP_TYPE_RINGBUF)
+ map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
- return bpf_map__set_max_entries(map, max_entries);
+ return 0;
}
static int
@@ -4587,7 +4597,7 @@ static int probe_kern_probe_read_kernel(void)
};
int fd, insn_cnt = ARRAY_SIZE(insns);
- fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, insn_cnt, NULL);
+ fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
return probe_fd(fd);
}
@@ -4678,6 +4688,31 @@ static int probe_perf_link(void)
return link_fd < 0 && err == -EBADF;
}
+static int probe_kern_bpf_cookie(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
+ BPF_EXIT_INSN(),
+ };
+ int ret, insn_cnt = ARRAY_SIZE(insns);
+
+ ret = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, insn_cnt, NULL);
+ return probe_fd(ret);
+}
+
+static int probe_kern_btf_enum64(void)
+{
+ static const char strs[] = "\0enum64";
+ __u32 types[] = {
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs)));
+}
+
+static int probe_kern_syscall_wrapper(void);
+
enum kern_feature_result {
FEAT_UNKNOWN = 0,
FEAT_SUPPORTED = 1,
@@ -4740,6 +4775,15 @@ static struct kern_feature_desc {
[FEAT_MEMCG_ACCOUNT] = {
"memcg-based memory accounting", probe_memcg_account,
},
+ [FEAT_BPF_COOKIE] = {
+ "BPF cookie support", probe_kern_bpf_cookie,
+ },
+ [FEAT_BTF_ENUM64] = {
+ "BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
+ },
+ [FEAT_SYSCALL_WRAPPER] = {
+ "Kernel using syscall wrapper", probe_kern_syscall_wrapper,
+ },
};
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
@@ -4923,7 +4967,6 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
case BPF_MAP_TYPE_SOCKHASH:
case BPF_MAP_TYPE_QUEUE:
case BPF_MAP_TYPE_STACK:
- case BPF_MAP_TYPE_RINGBUF:
create_attr.btf_fd = 0;
create_attr.btf_key_type_id = 0;
create_attr.btf_value_type_id = 0;
@@ -5109,9 +5152,11 @@ bpf_object__create_maps(struct bpf_object *obj)
* bpf_object loading will succeed just fine even on old
* kernels.
*/
- if (bpf_map__is_internal(map) &&
- !kernel_supports(obj, FEAT_GLOBAL_DATA)) {
- map->skipped = true;
+ if (bpf_map__is_internal(map) && !kernel_supports(obj, FEAT_GLOBAL_DATA))
+ map->autocreate = false;
+
+ if (!map->autocreate) {
+ pr_debug("map '%s': skipped auto-creating...\n", map->name);
continue;
}
@@ -5242,7 +5287,7 @@ int bpf_core_add_cands(struct bpf_core_cand *local_cand,
n = btf__type_cnt(targ_btf);
for (i = targ_start_id; i < n; i++) {
t = btf__type_by_id(targ_btf, i);
- if (btf_kind(t) != btf_kind(local_t))
+ if (!btf_kind_core_compat(t, local_t))
continue;
targ_name = btf__name_by_offset(targ_btf, t->name_off);
@@ -5450,76 +5495,13 @@ err_out:
int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
const struct btf *targ_btf, __u32 targ_id)
{
- const struct btf_type *local_type, *targ_type;
- int depth = 32; /* max recursion depth */
-
- /* caller made sure that names match (ignoring flavor suffix) */
- local_type = btf__type_by_id(local_btf, local_id);
- targ_type = btf__type_by_id(targ_btf, targ_id);
- if (btf_kind(local_type) != btf_kind(targ_type))
- return 0;
-
-recur:
- depth--;
- if (depth < 0)
- return -EINVAL;
-
- local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
- targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
- if (!local_type || !targ_type)
- return -EINVAL;
-
- if (btf_kind(local_type) != btf_kind(targ_type))
- return 0;
-
- switch (btf_kind(local_type)) {
- case BTF_KIND_UNKN:
- case BTF_KIND_STRUCT:
- case BTF_KIND_UNION:
- case BTF_KIND_ENUM:
- case BTF_KIND_FWD:
- return 1;
- case BTF_KIND_INT:
- /* just reject deprecated bitfield-like integers; all other
- * integers are by default compatible between each other
- */
- return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
- case BTF_KIND_PTR:
- local_id = local_type->type;
- targ_id = targ_type->type;
- goto recur;
- case BTF_KIND_ARRAY:
- local_id = btf_array(local_type)->type;
- targ_id = btf_array(targ_type)->type;
- goto recur;
- case BTF_KIND_FUNC_PROTO: {
- struct btf_param *local_p = btf_params(local_type);
- struct btf_param *targ_p = btf_params(targ_type);
- __u16 local_vlen = btf_vlen(local_type);
- __u16 targ_vlen = btf_vlen(targ_type);
- int i, err;
-
- if (local_vlen != targ_vlen)
- return 0;
-
- for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
- skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
- skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
- err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id);
- if (err <= 0)
- return err;
- }
+ return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, 32);
+}
- /* tail recurse for return type check */
- skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
- skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
- goto recur;
- }
- default:
- pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
- btf_kind_str(local_type), local_id, targ_id);
- return 0;
- }
+int bpf_core_types_match(const struct btf *local_btf, __u32 local_id,
+ const struct btf *targ_btf, __u32 targ_id)
+{
+ return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false, 32);
}
static size_t bpf_core_hash_fn(const void *key, void *ctx)
@@ -5555,6 +5537,22 @@ static int record_relo_core(struct bpf_program *prog,
return 0;
}
+static const struct bpf_core_relo *find_relo_core(struct bpf_program *prog, int insn_idx)
+{
+ struct reloc_desc *relo;
+ int i;
+
+ for (i = 0; i < prog->nr_reloc; i++) {
+ relo = &prog->reloc_desc[i];
+ if (relo->type != RELO_CORE || relo->insn_idx != insn_idx)
+ continue;
+
+ return relo->core_relo;
+ }
+
+ return NULL;
+}
+
static int bpf_core_resolve_relo(struct bpf_program *prog,
const struct bpf_core_relo *relo,
int relo_idx,
@@ -5611,7 +5609,7 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
struct bpf_program *prog;
struct bpf_insn *insn;
const char *sec_name;
- int i, err = 0, insn_idx, sec_idx;
+ int i, err = 0, insn_idx, sec_idx, sec_num;
if (obj->btf_ext->core_relo_info.len == 0)
return 0;
@@ -5632,32 +5630,18 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
}
seg = &obj->btf_ext->core_relo_info;
+ sec_num = 0;
for_each_btf_ext_sec(seg, sec) {
+ sec_idx = seg->sec_idxs[sec_num];
+ sec_num++;
+
sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
if (str_is_empty(sec_name)) {
err = -EINVAL;
goto out;
}
- /* bpf_object's ELF is gone by now so it's not easy to find
- * section index by section name, but we can find *any*
- * bpf_program within desired section name and use it's
- * prog->sec_idx to do a proper search by section index and
- * instruction offset
- */
- prog = NULL;
- for (i = 0; i < obj->nr_programs; i++) {
- prog = &obj->programs[i];
- if (strcmp(prog->sec_name, sec_name) == 0)
- break;
- }
- if (!prog) {
- pr_warn("sec '%s': failed to find a BPF program\n", sec_name);
- return -ENOENT;
- }
- sec_idx = prog->sec_idx;
- pr_debug("sec '%s': found %d CO-RE relocations\n",
- sec_name, sec->num_info);
+ pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info);
for_each_btf_ext_rec(seg, sec, i, rec) {
if (rec->insn_off % BPF_INSN_SZ)
@@ -5665,15 +5649,22 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
insn_idx = rec->insn_off / BPF_INSN_SZ;
prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
if (!prog) {
- pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n",
- sec_name, insn_idx, i);
- err = -EINVAL;
- goto out;
+ /* When __weak subprog is "overridden" by another instance
+ * of the subprog from a different object file, linker still
+ * appends all the .BTF.ext info that used to belong to that
+ * eliminated subprogram.
+ * This is similar to what x86-64 linker does for relocations.
+ * So just ignore such relocations just like we ignore
+ * subprog instructions when discovering subprograms.
+ */
+ pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n",
+ sec_name, i, insn_idx);
+ continue;
}
/* no need to apply CO-RE relocation if the program is
* not going to be loaded
*/
- if (!prog->load)
+ if (!prog->autoload)
continue;
/* adjust insn_idx from section frame of reference to the local
@@ -5685,16 +5676,16 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
return -EINVAL;
insn = &prog->insns[insn_idx];
- if (prog->obj->gen_loader) {
- err = record_relo_core(prog, rec, insn_idx);
- if (err) {
- pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n",
- prog->name, i, err);
- goto out;
- }
- continue;
+ err = record_relo_core(prog, rec, insn_idx);
+ if (err) {
+ pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n",
+ prog->name, i, err);
+ goto out;
}
+ if (prog->obj->gen_loader)
+ continue;
+
err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res);
if (err) {
pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
@@ -5725,6 +5716,36 @@ out:
return err;
}
+/* base map load ldimm64 special constant, used also for log fixup logic */
+#define MAP_LDIMM64_POISON_BASE 2001000000
+#define MAP_LDIMM64_POISON_PFX "200100"
+
+static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx,
+ int insn_idx, struct bpf_insn *insn,
+ int map_idx, const struct bpf_map *map)
+{
+ int i;
+
+ pr_debug("prog '%s': relo #%d: poisoning insn #%d that loads map #%d '%s'\n",
+ prog->name, relo_idx, insn_idx, map_idx, map->name);
+
+ /* we turn single ldimm64 into two identical invalid calls */
+ for (i = 0; i < 2; i++) {
+ insn->code = BPF_JMP | BPF_CALL;
+ insn->dst_reg = 0;
+ insn->src_reg = 0;
+ insn->off = 0;
+ /* if this instruction is reachable (not a dead code),
+ * verifier will complain with something like:
+ * invalid func unknown#2001000123
+ * where lower 123 is map index into obj->maps[] array
+ */
+ insn->imm = MAP_LDIMM64_POISON_BASE + map_idx;
+
+ insn++;
+ }
+}
+
/* Relocate data references within program code:
* - map references;
* - global variable references;
@@ -5738,33 +5759,35 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
for (i = 0; i < prog->nr_reloc; i++) {
struct reloc_desc *relo = &prog->reloc_desc[i];
struct bpf_insn *insn = &prog->insns[relo->insn_idx];
+ const struct bpf_map *map;
struct extern_desc *ext;
switch (relo->type) {
case RELO_LD64:
+ map = &obj->maps[relo->map_idx];
if (obj->gen_loader) {
insn[0].src_reg = BPF_PSEUDO_MAP_IDX;
insn[0].imm = relo->map_idx;
- } else {
+ } else if (map->autocreate) {
insn[0].src_reg = BPF_PSEUDO_MAP_FD;
- insn[0].imm = obj->maps[relo->map_idx].fd;
+ insn[0].imm = map->fd;
+ } else {
+ poison_map_ldimm64(prog, i, relo->insn_idx, insn,
+ relo->map_idx, map);
}
break;
case RELO_DATA:
+ map = &obj->maps[relo->map_idx];
insn[1].imm = insn[0].imm + relo->sym_off;
if (obj->gen_loader) {
insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
insn[0].imm = relo->map_idx;
- } else {
- const struct bpf_map *map = &obj->maps[relo->map_idx];
-
- if (map->skipped) {
- pr_warn("prog '%s': relo #%d: kernel doesn't support global data\n",
- prog->name, i);
- return -ENOTSUP;
- }
+ } else if (map->autocreate) {
insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
- insn[0].imm = obj->maps[relo->map_idx].fd;
+ insn[0].imm = map->fd;
+ } else {
+ poison_map_ldimm64(prog, i, relo->insn_idx, insn,
+ relo->map_idx, map);
}
break;
case RELO_EXTERN_VAR:
@@ -5834,14 +5857,13 @@ static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
void *rec, *rec_end, *new_prog_info;
const struct btf_ext_info_sec *sec;
size_t old_sz, new_sz;
- const char *sec_name;
- int i, off_adj;
+ int i, sec_num, sec_idx, off_adj;
+ sec_num = 0;
for_each_btf_ext_sec(ext_info, sec) {
- sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
- if (!sec_name)
- return -EINVAL;
- if (strcmp(sec_name, prog->sec_name) != 0)
+ sec_idx = ext_info->sec_idxs[sec_num];
+ sec_num++;
+ if (prog->sec_idx != sec_idx)
continue;
for_each_btf_ext_rec(ext_info, sec, i, rec) {
@@ -6236,7 +6258,6 @@ bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
if (err)
return err;
-
return 0;
}
@@ -6297,8 +6318,7 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
err);
return err;
}
- if (obj->gen_loader)
- bpf_object__sort_relos(obj);
+ bpf_object__sort_relos(obj);
}
/* Before relocating calls pre-process relocations and mark
@@ -6334,7 +6354,7 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
*/
if (prog_is_subprog(obj, prog))
continue;
- if (!prog->load)
+ if (!prog->autoload)
continue;
err = bpf_object__relocate_calls(obj, prog);
@@ -6349,7 +6369,7 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
prog = &obj->programs[i];
if (prog_is_subprog(obj, prog))
continue;
- if (!prog->load)
+ if (!prog->autoload)
continue;
err = bpf_object__relocate_data(obj, prog);
if (err) {
@@ -6358,8 +6378,7 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
return err;
}
}
- if (!obj->gen_loader)
- bpf_object__free_relocs(obj);
+
return 0;
}
@@ -6606,17 +6625,27 @@ static int libbpf_prepare_prog_load(struct bpf_program *prog,
if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS))
opts->prog_flags |= BPF_F_XDP_HAS_FRAGS;
- if (def & SEC_DEPRECATED)
- pr_warn("SEC(\"%s\") is deprecated, please see https://github.com/libbpf/libbpf/wiki/Libbpf-1.0-migration-guide#bpf-program-sec-annotation-deprecations for details\n",
- prog->sec_name);
-
- if ((prog->type == BPF_PROG_TYPE_TRACING ||
- prog->type == BPF_PROG_TYPE_LSM ||
- prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
+ if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) {
int btf_obj_fd = 0, btf_type_id = 0, err;
const char *attach_name;
- attach_name = strchr(prog->sec_name, '/') + 1;
+ attach_name = strchr(prog->sec_name, '/');
+ if (!attach_name) {
+ /* if BPF program is annotated with just SEC("fentry")
+ * (or similar) without declaratively specifying
+ * target, then it is expected that target will be
+ * specified with bpf_program__set_attach_target() at
+ * runtime before BPF object load step. If not, then
+ * there is nothing to load into the kernel as BPF
+ * verifier won't be able to validate BPF program
+ * correctness anyways.
+ */
+ pr_warn("prog '%s': no BTF-based attach target is specified, use bpf_program__set_attach_target()\n",
+ prog->name);
+ return -EINVAL;
+ }
+ attach_name++; /* skip over / */
+
err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id);
if (err)
return err;
@@ -6636,10 +6665,11 @@ static int libbpf_prepare_prog_load(struct bpf_program *prog,
return 0;
}
-static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_program *prog,
- struct bpf_insn *insns, int insns_cnt,
- const char *license, __u32 kern_version,
- int *prog_fd)
+static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz);
+
+static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog,
+ struct bpf_insn *insns, int insns_cnt,
+ const char *license, __u32 kern_version, int *prog_fd)
{
LIBBPF_OPTS(bpf_prog_load_opts, load_attr);
const char *prog_name = NULL;
@@ -6695,6 +6725,8 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog
prog->name, err);
return err;
}
+ insns = prog->insns;
+ insns_cnt = prog->insns_cnt;
}
if (obj->gen_loader) {
@@ -6706,7 +6738,7 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog
}
retry_load:
- /* if log_level is zero, we don't request logs initiallly even if
+ /* if log_level is zero, we don't request logs initially even if
* custom log_buf is specified; if the program load fails, then we'll
* bump log_level to 1 and use either custom log_buf or we'll allocate
* our own and retry the load to get details on what failed
@@ -6782,6 +6814,10 @@ retry_load:
goto retry_load;
ret = -errno;
+
+ /* post-process verifier log to improve error descriptions */
+ fixup_verifier_log(prog, log_buf, log_buf_size);
+
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp);
pr_perm_msg(ret);
@@ -6790,10 +6826,6 @@ retry_load:
pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
prog->name, log_buf);
}
- if (insns_cnt >= BPF_MAXINSNS) {
- pr_warn("prog '%s': program too large (%d insns), at most %d insns\n",
- prog->name, insns_cnt, BPF_MAXINSNS);
- }
out:
if (own_log_buf)
@@ -6801,6 +6833,169 @@ out:
return ret;
}
+static char *find_prev_line(char *buf, char *cur)
+{
+ char *p;
+
+ if (cur == buf) /* end of a log buf */
+ return NULL;
+
+ p = cur - 1;
+ while (p - 1 >= buf && *(p - 1) != '\n')
+ p--;
+
+ return p;
+}
+
+static void patch_log(char *buf, size_t buf_sz, size_t log_sz,
+ char *orig, size_t orig_sz, const char *patch)
+{
+ /* size of the remaining log content to the right from the to-be-replaced part */
+ size_t rem_sz = (buf + log_sz) - (orig + orig_sz);
+ size_t patch_sz = strlen(patch);
+
+ if (patch_sz != orig_sz) {
+ /* If patch line(s) are longer than original piece of verifier log,
+ * shift log contents by (patch_sz - orig_sz) bytes to the right
+ * starting from after to-be-replaced part of the log.
+ *
+ * If patch line(s) are shorter than original piece of verifier log,
+ * shift log contents by (orig_sz - patch_sz) bytes to the left
+ * starting from after to-be-replaced part of the log
+ *
+ * We need to be careful about not overflowing available
+ * buf_sz capacity. If that's the case, we'll truncate the end
+ * of the original log, as necessary.
+ */
+ if (patch_sz > orig_sz) {
+ if (orig + patch_sz >= buf + buf_sz) {
+ /* patch is big enough to cover remaining space completely */
+ patch_sz -= (orig + patch_sz) - (buf + buf_sz) + 1;
+ rem_sz = 0;
+ } else if (patch_sz - orig_sz > buf_sz - log_sz) {
+ /* patch causes part of remaining log to be truncated */
+ rem_sz -= (patch_sz - orig_sz) - (buf_sz - log_sz);
+ }
+ }
+ /* shift remaining log to the right by calculated amount */
+ memmove(orig + patch_sz, orig + orig_sz, rem_sz);
+ }
+
+ memcpy(orig, patch, patch_sz);
+}
+
+static void fixup_log_failed_core_relo(struct bpf_program *prog,
+ char *buf, size_t buf_sz, size_t log_sz,
+ char *line1, char *line2, char *line3)
+{
+ /* Expected log for failed and not properly guarded CO-RE relocation:
+ * line1 -> 123: (85) call unknown#195896080
+ * line2 -> invalid func unknown#195896080
+ * line3 -> <anything else or end of buffer>
+ *
+ * "123" is the index of the instruction that was poisoned. We extract
+ * instruction index to find corresponding CO-RE relocation and
+ * replace this part of the log with more relevant information about
+ * failed CO-RE relocation.
+ */
+ const struct bpf_core_relo *relo;
+ struct bpf_core_spec spec;
+ char patch[512], spec_buf[256];
+ int insn_idx, err, spec_len;
+
+ if (sscanf(line1, "%d: (%*d) call unknown#195896080\n", &insn_idx) != 1)
+ return;
+
+ relo = find_relo_core(prog, insn_idx);
+ if (!relo)
+ return;
+
+ err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec);
+ if (err)
+ return;
+
+ spec_len = bpf_core_format_spec(spec_buf, sizeof(spec_buf), &spec);
+ snprintf(patch, sizeof(patch),
+ "%d: <invalid CO-RE relocation>\n"
+ "failed to resolve CO-RE relocation %s%s\n",
+ insn_idx, spec_buf, spec_len >= sizeof(spec_buf) ? "..." : "");
+
+ patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
+}
+
+static void fixup_log_missing_map_load(struct bpf_program *prog,
+ char *buf, size_t buf_sz, size_t log_sz,
+ char *line1, char *line2, char *line3)
+{
+ /* Expected log for failed and not properly guarded CO-RE relocation:
+ * line1 -> 123: (85) call unknown#2001000345
+ * line2 -> invalid func unknown#2001000345
+ * line3 -> <anything else or end of buffer>
+ *
+ * "123" is the index of the instruction that was poisoned.
+ * "345" in "2001000345" are map index in obj->maps to fetch map name.
+ */
+ struct bpf_object *obj = prog->obj;
+ const struct bpf_map *map;
+ int insn_idx, map_idx;
+ char patch[128];
+
+ if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &map_idx) != 2)
+ return;
+
+ map_idx -= MAP_LDIMM64_POISON_BASE;
+ if (map_idx < 0 || map_idx >= obj->nr_maps)
+ return;
+ map = &obj->maps[map_idx];
+
+ snprintf(patch, sizeof(patch),
+ "%d: <invalid BPF map reference>\n"
+ "BPF map '%s' is referenced but wasn't created\n",
+ insn_idx, map->name);
+
+ patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
+}
+
+static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz)
+{
+ /* look for familiar error patterns in last N lines of the log */
+ const size_t max_last_line_cnt = 10;
+ char *prev_line, *cur_line, *next_line;
+ size_t log_sz;
+ int i;
+
+ if (!buf)
+ return;
+
+ log_sz = strlen(buf) + 1;
+ next_line = buf + log_sz - 1;
+
+ for (i = 0; i < max_last_line_cnt; i++, next_line = cur_line) {
+ cur_line = find_prev_line(buf, next_line);
+ if (!cur_line)
+ return;
+
+ /* failed CO-RE relocation case */
+ if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) {
+ prev_line = find_prev_line(buf, cur_line);
+ if (!prev_line)
+ continue;
+
+ fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz,
+ prev_line, cur_line, next_line);
+ return;
+ } else if (str_has_pfx(cur_line, "invalid func unknown#"MAP_LDIMM64_POISON_PFX)) {
+ prev_line = find_prev_line(buf, cur_line);
+ if (!prev_line)
+ continue;
+
+ fixup_log_missing_map_load(prog, buf, buf_sz, log_sz,
+ prev_line, cur_line, next_line);
+ return;
+ }
+ }
+}
+
static int bpf_program_record_relos(struct bpf_program *prog)
{
struct bpf_object *obj = prog->obj;
@@ -6841,93 +7036,6 @@ static int bpf_program_record_relos(struct bpf_program *prog)
return 0;
}
-static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog,
- const char *license, __u32 kern_ver)
-{
- int err = 0, fd, i;
-
- if (obj->loaded) {
- pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
- return libbpf_err(-EINVAL);
- }
-
- if (prog->instances.nr < 0 || !prog->instances.fds) {
- if (prog->preprocessor) {
- pr_warn("Internal error: can't load program '%s'\n",
- prog->name);
- return libbpf_err(-LIBBPF_ERRNO__INTERNAL);
- }
-
- prog->instances.fds = malloc(sizeof(int));
- if (!prog->instances.fds) {
- pr_warn("Not enough memory for BPF fds\n");
- return libbpf_err(-ENOMEM);
- }
- prog->instances.nr = 1;
- prog->instances.fds[0] = -1;
- }
-
- if (!prog->preprocessor) {
- if (prog->instances.nr != 1) {
- pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
- prog->name, prog->instances.nr);
- }
- if (obj->gen_loader)
- bpf_program_record_relos(prog);
- err = bpf_object_load_prog_instance(obj, prog,
- prog->insns, prog->insns_cnt,
- license, kern_ver, &fd);
- if (!err)
- prog->instances.fds[0] = fd;
- goto out;
- }
-
- for (i = 0; i < prog->instances.nr; i++) {
- struct bpf_prog_prep_result result;
- bpf_program_prep_t preprocessor = prog->preprocessor;
-
- memset(&result, 0, sizeof(result));
- err = preprocessor(prog, i, prog->insns,
- prog->insns_cnt, &result);
- if (err) {
- pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
- i, prog->name);
- goto out;
- }
-
- if (!result.new_insn_ptr || !result.new_insn_cnt) {
- pr_debug("Skip loading the %dth instance of program '%s'\n",
- i, prog->name);
- prog->instances.fds[i] = -1;
- if (result.pfd)
- *result.pfd = -1;
- continue;
- }
-
- err = bpf_object_load_prog_instance(obj, prog,
- result.new_insn_ptr, result.new_insn_cnt,
- license, kern_ver, &fd);
- if (err) {
- pr_warn("Loading the %dth instance of program '%s' failed\n",
- i, prog->name);
- goto out;
- }
-
- if (result.pfd)
- *result.pfd = fd;
- prog->instances.fds[i] = fd;
- }
-out:
- if (err)
- pr_warn("failed to load program '%s'\n", prog->name);
- return libbpf_err(err);
-}
-
-int bpf_program__load(struct bpf_program *prog, const char *license, __u32 kern_ver)
-{
- return bpf_object_load_prog(prog->obj, prog, license, kern_ver);
-}
-
static int
bpf_object__load_progs(struct bpf_object *obj, int log_level)
{
@@ -6946,17 +7054,24 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
prog = &obj->programs[i];
if (prog_is_subprog(obj, prog))
continue;
- if (!prog->load) {
+ if (!prog->autoload) {
pr_debug("prog '%s': skipped loading\n", prog->name);
continue;
}
prog->log_level |= log_level;
- err = bpf_object_load_prog(obj, prog, obj->license, obj->kern_version);
- if (err)
+
+ if (obj->gen_loader)
+ bpf_program_record_relos(prog);
+
+ err = bpf_object_load_prog(obj, prog, prog->insns, prog->insns_cnt,
+ obj->license, obj->kern_version, &prog->fd);
+ if (err) {
+ pr_warn("prog '%s': failed to load: %d\n", prog->name, err);
return err;
+ }
}
- if (obj->gen_loader)
- bpf_object__free_relocs(obj);
+
+ bpf_object__free_relocs(obj);
return 0;
}
@@ -6976,15 +7091,8 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
continue;
}
- bpf_program__set_type(prog, prog->sec_def->prog_type);
- bpf_program__set_expected_attach_type(prog, prog->sec_def->expected_attach_type);
-
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
- if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING ||
- prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
- prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
-#pragma GCC diagnostic pop
+ prog->type = prog->sec_def->prog_type;
+ prog->expected_attach_type = prog->sec_def->expected_attach_type;
/* sec_def can have custom callback which should be called
* after bpf_program is initialized to adjust its properties
@@ -7091,36 +7199,6 @@ out:
return ERR_PTR(err);
}
-static struct bpf_object *
-__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
-{
- DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
- .relaxed_maps = flags & MAPS_RELAX_COMPAT,
- );
-
- /* param validation */
- if (!attr->file)
- return NULL;
-
- pr_debug("loading %s\n", attr->file);
- return bpf_object_open(attr->file, NULL, 0, &opts);
-}
-
-struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
-{
- return libbpf_ptr(__bpf_object__open_xattr(attr, 0));
-}
-
-struct bpf_object *bpf_object__open(const char *path)
-{
- struct bpf_object_open_attr attr = {
- .file = path,
- .prog_type = BPF_PROG_TYPE_UNSPEC,
- };
-
- return libbpf_ptr(__bpf_object__open_xattr(&attr, 0));
-}
-
struct bpf_object *
bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
{
@@ -7132,6 +7210,11 @@ bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
return libbpf_ptr(bpf_object_open(path, NULL, 0, opts));
}
+struct bpf_object *bpf_object__open(const char *path)
+{
+ return bpf_object__open_file(path, NULL);
+}
+
struct bpf_object *
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
const struct bpf_object_open_opts *opts)
@@ -7142,23 +7225,6 @@ bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts));
}
-struct bpf_object *
-bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
- const char *name)
-{
- DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
- .object_name = name,
- /* wrong default, but backwards-compatible */
- .relaxed_maps = true,
- );
-
- /* returning NULL is wrong, but backwards-compatible */
- if (!obj_buf || obj_buf_sz == 0)
- return errno = EINVAL, NULL;
-
- return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, &opts));
-}
-
static int bpf_object_unload(struct bpf_object *obj)
{
size_t i;
@@ -7244,14 +7310,14 @@ static int kallsyms_cb(unsigned long long sym_addr, char sym_type,
return 0;
if (ext->is_set && ext->ksym.addr != sym_addr) {
- pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n",
+ pr_warn("extern (ksym) '%s': resolution is ambiguous: 0x%llx or 0x%llx\n",
sym_name, ext->ksym.addr, sym_addr);
return -EINVAL;
}
if (!ext->is_set) {
ext->is_set = true;
ext->ksym.addr = sym_addr;
- pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr);
+ pr_debug("extern (ksym) '%s': set to 0x%llx\n", sym_name, sym_addr);
}
return 0;
}
@@ -7455,28 +7521,52 @@ static int bpf_object__resolve_externs(struct bpf_object *obj,
for (i = 0; i < obj->nr_extern; i++) {
ext = &obj->externs[i];
- if (ext->type == EXT_KCFG &&
- strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
- void *ext_val = kcfg_data + ext->kcfg.data_off;
- __u32 kver = get_kernel_version();
+ if (ext->type == EXT_KSYM) {
+ if (ext->ksym.type_id)
+ need_vmlinux_btf = true;
+ else
+ need_kallsyms = true;
+ continue;
+ } else if (ext->type == EXT_KCFG) {
+ void *ext_ptr = kcfg_data + ext->kcfg.data_off;
+ __u64 value = 0;
+
+ /* Kconfig externs need actual /proc/config.gz */
+ if (str_has_pfx(ext->name, "CONFIG_")) {
+ need_config = true;
+ continue;
+ }
- if (!kver) {
- pr_warn("failed to get kernel version\n");
+ /* Virtual kcfg externs are customly handled by libbpf */
+ if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
+ value = get_kernel_version();
+ if (!value) {
+ pr_warn("extern (kcfg) '%s': failed to get kernel version\n", ext->name);
+ return -EINVAL;
+ }
+ } else if (strcmp(ext->name, "LINUX_HAS_BPF_COOKIE") == 0) {
+ value = kernel_supports(obj, FEAT_BPF_COOKIE);
+ } else if (strcmp(ext->name, "LINUX_HAS_SYSCALL_WRAPPER") == 0) {
+ value = kernel_supports(obj, FEAT_SYSCALL_WRAPPER);
+ } else if (!str_has_pfx(ext->name, "LINUX_") || !ext->is_weak) {
+ /* Currently libbpf supports only CONFIG_ and LINUX_ prefixed
+ * __kconfig externs, where LINUX_ ones are virtual and filled out
+ * customly by libbpf (their values don't come from Kconfig).
+ * If LINUX_xxx variable is not recognized by libbpf, but is marked
+ * __weak, it defaults to zero value, just like for CONFIG_xxx
+ * externs.
+ */
+ pr_warn("extern (kcfg) '%s': unrecognized virtual extern\n", ext->name);
return -EINVAL;
}
- err = set_kcfg_value_num(ext, ext_val, kver);
+
+ err = set_kcfg_value_num(ext, ext_ptr, value);
if (err)
return err;
- pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver);
- } else if (ext->type == EXT_KCFG && str_has_pfx(ext->name, "CONFIG_")) {
- need_config = true;
- } else if (ext->type == EXT_KSYM) {
- if (ext->ksym.type_id)
- need_vmlinux_btf = true;
- else
- need_kallsyms = true;
+ pr_debug("extern (kcfg) '%s': set to 0x%llx\n",
+ ext->name, (long long)value);
} else {
- pr_warn("unrecognized extern '%s'\n", ext->name);
+ pr_warn("extern '%s': unrecognized extern kind\n", ext->name);
return -EINVAL;
}
}
@@ -7512,10 +7602,10 @@ static int bpf_object__resolve_externs(struct bpf_object *obj,
ext = &obj->externs[i];
if (!ext->is_set && !ext->is_weak) {
- pr_warn("extern %s (strong) not resolved\n", ext->name);
+ pr_warn("extern '%s' (strong): not resolved\n", ext->name);
return -ESRCH;
} else if (!ext->is_set) {
- pr_debug("extern %s (weak) not resolved, defaulting to zero\n",
+ pr_debug("extern '%s' (weak): not resolved, defaulting to zero\n",
ext->name);
}
}
@@ -7591,11 +7681,6 @@ out:
return libbpf_err(err);
}
-int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
-{
- return bpf_object_load(attr->obj, attr->log_level, attr->target_btf_path);
-}
-
int bpf_object__load(struct bpf_object *obj)
{
return bpf_object_load(obj, 0, NULL);
@@ -7653,11 +7738,16 @@ static int check_path(const char *path)
return err;
}
-static int bpf_program_pin_instance(struct bpf_program *prog, const char *path, int instance)
+int bpf_program__pin(struct bpf_program *prog, const char *path)
{
char *cp, errmsg[STRERR_BUFSIZE];
int err;
+ if (prog->fd < 0) {
+ pr_warn("prog '%s': can't pin program that wasn't loaded\n", prog->name);
+ return libbpf_err(-EINVAL);
+ }
+
err = make_parent_dir(path);
if (err)
return libbpf_err(err);
@@ -7666,170 +7756,35 @@ static int bpf_program_pin_instance(struct bpf_program *prog, const char *path,
if (err)
return libbpf_err(err);
- if (prog == NULL) {
- pr_warn("invalid program pointer\n");
- return libbpf_err(-EINVAL);
- }
-
- if (instance < 0 || instance >= prog->instances.nr) {
- pr_warn("invalid prog instance %d of prog %s (max %d)\n",
- instance, prog->name, prog->instances.nr);
- return libbpf_err(-EINVAL);
- }
-
- if (bpf_obj_pin(prog->instances.fds[instance], path)) {
+ if (bpf_obj_pin(prog->fd, path)) {
err = -errno;
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
- pr_warn("failed to pin program: %s\n", cp);
+ pr_warn("prog '%s': failed to pin at '%s': %s\n", prog->name, path, cp);
return libbpf_err(err);
}
- pr_debug("pinned program '%s'\n", path);
+ pr_debug("prog '%s': pinned at '%s'\n", prog->name, path);
return 0;
}
-static int bpf_program_unpin_instance(struct bpf_program *prog, const char *path, int instance)
+int bpf_program__unpin(struct bpf_program *prog, const char *path)
{
int err;
- err = check_path(path);
- if (err)
- return libbpf_err(err);
-
- if (prog == NULL) {
- pr_warn("invalid program pointer\n");
- return libbpf_err(-EINVAL);
- }
-
- if (instance < 0 || instance >= prog->instances.nr) {
- pr_warn("invalid prog instance %d of prog %s (max %d)\n",
- instance, prog->name, prog->instances.nr);
- return libbpf_err(-EINVAL);
- }
-
- err = unlink(path);
- if (err != 0)
- return libbpf_err(-errno);
-
- pr_debug("unpinned program '%s'\n", path);
-
- return 0;
-}
-
-__attribute__((alias("bpf_program_pin_instance")))
-int bpf_object__pin_instance(struct bpf_program *prog, const char *path, int instance);
-
-__attribute__((alias("bpf_program_unpin_instance")))
-int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, int instance);
-
-int bpf_program__pin(struct bpf_program *prog, const char *path)
-{
- int i, err;
-
- err = make_parent_dir(path);
- if (err)
- return libbpf_err(err);
-
- err = check_path(path);
- if (err)
- return libbpf_err(err);
-
- if (prog == NULL) {
- pr_warn("invalid program pointer\n");
- return libbpf_err(-EINVAL);
- }
-
- if (prog->instances.nr <= 0) {
- pr_warn("no instances of prog %s to pin\n", prog->name);
+ if (prog->fd < 0) {
+ pr_warn("prog '%s': can't unpin program that wasn't loaded\n", prog->name);
return libbpf_err(-EINVAL);
}
- if (prog->instances.nr == 1) {
- /* don't create subdirs when pinning single instance */
- return bpf_program_pin_instance(prog, path, 0);
- }
-
- for (i = 0; i < prog->instances.nr; i++) {
- char buf[PATH_MAX];
- int len;
-
- len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
- if (len < 0) {
- err = -EINVAL;
- goto err_unpin;
- } else if (len >= PATH_MAX) {
- err = -ENAMETOOLONG;
- goto err_unpin;
- }
-
- err = bpf_program_pin_instance(prog, buf, i);
- if (err)
- goto err_unpin;
- }
-
- return 0;
-
-err_unpin:
- for (i = i - 1; i >= 0; i--) {
- char buf[PATH_MAX];
- int len;
-
- len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
- if (len < 0)
- continue;
- else if (len >= PATH_MAX)
- continue;
-
- bpf_program_unpin_instance(prog, buf, i);
- }
-
- rmdir(path);
-
- return libbpf_err(err);
-}
-
-int bpf_program__unpin(struct bpf_program *prog, const char *path)
-{
- int i, err;
-
err = check_path(path);
if (err)
return libbpf_err(err);
- if (prog == NULL) {
- pr_warn("invalid program pointer\n");
- return libbpf_err(-EINVAL);
- }
-
- if (prog->instances.nr <= 0) {
- pr_warn("no instances of prog %s to pin\n", prog->name);
- return libbpf_err(-EINVAL);
- }
-
- if (prog->instances.nr == 1) {
- /* don't create subdirs when pinning single instance */
- return bpf_program_unpin_instance(prog, path, 0);
- }
-
- for (i = 0; i < prog->instances.nr; i++) {
- char buf[PATH_MAX];
- int len;
-
- len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
- if (len < 0)
- return libbpf_err(-EINVAL);
- else if (len >= PATH_MAX)
- return libbpf_err(-ENAMETOOLONG);
-
- err = bpf_program_unpin_instance(prog, buf, i);
- if (err)
- return err;
- }
-
- err = rmdir(path);
+ err = unlink(path);
if (err)
return libbpf_err(-errno);
+ pr_debug("prog '%s': unpinned from '%s'\n", prog->name, path);
return 0;
}
@@ -7985,7 +7940,7 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
char *pin_path = NULL;
char buf[PATH_MAX];
- if (map->skipped)
+ if (!map->autocreate)
continue;
if (path) {
@@ -8076,8 +8031,7 @@ int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
char buf[PATH_MAX];
int len;
- len = snprintf(buf, PATH_MAX, "%s/%s", path,
- prog->pin_name);
+ len = snprintf(buf, PATH_MAX, "%s/%s", path, prog->name);
if (len < 0) {
err = -EINVAL;
goto err_unpin_programs;
@@ -8098,8 +8052,7 @@ err_unpin_programs:
char buf[PATH_MAX];
int len;
- len = snprintf(buf, PATH_MAX, "%s/%s", path,
- prog->pin_name);
+ len = snprintf(buf, PATH_MAX, "%s/%s", path, prog->name);
if (len < 0)
continue;
else if (len >= PATH_MAX)
@@ -8123,8 +8076,7 @@ int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
char buf[PATH_MAX];
int len;
- len = snprintf(buf, PATH_MAX, "%s/%s", path,
- prog->pin_name);
+ len = snprintf(buf, PATH_MAX, "%s/%s", path, prog->name);
if (len < 0)
return libbpf_err(-EINVAL);
else if (len >= PATH_MAX)
@@ -8157,11 +8109,6 @@ int bpf_object__pin(struct bpf_object *obj, const char *path)
static void bpf_map__destroy(struct bpf_map *map)
{
- if (map->clear_priv)
- map->clear_priv(map, map->priv);
- map->priv = NULL;
- map->clear_priv = NULL;
-
if (map->inner_map) {
bpf_map__destroy(map->inner_map);
zfree(&map->inner_map);
@@ -8197,8 +8144,8 @@ void bpf_object__close(struct bpf_object *obj)
if (IS_ERR_OR_NULL(obj))
return;
- if (obj->clear_priv)
- obj->clear_priv(obj, obj->priv);
+ usdt_manager_free(obj->usdt_man);
+ obj->usdt_man = NULL;
bpf_gen__free(obj->gen_loader);
bpf_object__elf_finish(obj);
@@ -8223,33 +8170,9 @@ void bpf_object__close(struct bpf_object *obj)
}
zfree(&obj->programs);
- list_del(&obj->list);
free(obj);
}
-struct bpf_object *
-bpf_object__next(struct bpf_object *prev)
-{
- struct bpf_object *next;
- bool strict = (libbpf_mode & LIBBPF_STRICT_NO_OBJECT_LIST);
-
- if (strict)
- return NULL;
-
- if (!prev)
- next = list_first_entry(&bpf_objects_list,
- struct bpf_object,
- list);
- else
- next = list_next_entry(prev, list);
-
- /* Empty list is noticed here so don't need checking on entry. */
- if (&next->list == &bpf_objects_list)
- return NULL;
-
- return next;
-}
-
const char *bpf_object__name(const struct bpf_object *obj)
{
return obj ? obj->name : libbpf_err_ptr(-EINVAL);
@@ -8280,22 +8203,6 @@ int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
return 0;
}
-int bpf_object__set_priv(struct bpf_object *obj, void *priv,
- bpf_object_clear_priv_t clear_priv)
-{
- if (obj->priv && obj->clear_priv)
- obj->clear_priv(obj, obj->priv);
-
- obj->priv = priv;
- obj->clear_priv = clear_priv;
- return 0;
-}
-
-void *bpf_object__priv(const struct bpf_object *obj)
-{
- return obj ? obj->priv : libbpf_err_ptr(-EINVAL);
-}
-
int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
{
struct bpf_gen *gen;
@@ -8339,12 +8246,6 @@ __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
}
struct bpf_program *
-bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
-{
- return bpf_object__next_program(obj, prev);
-}
-
-struct bpf_program *
bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
{
struct bpf_program *prog = prev;
@@ -8357,12 +8258,6 @@ bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
}
struct bpf_program *
-bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
-{
- return bpf_object__prev_program(obj, next);
-}
-
-struct bpf_program *
bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next)
{
struct bpf_program *prog = next;
@@ -8374,22 +8269,6 @@ bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next)
return prog;
}
-int bpf_program__set_priv(struct bpf_program *prog, void *priv,
- bpf_program_clear_priv_t clear_priv)
-{
- if (prog->priv && prog->clear_priv)
- prog->clear_priv(prog, prog->priv);
-
- prog->priv = priv;
- prog->clear_priv = clear_priv;
- return 0;
-}
-
-void *bpf_program__priv(const struct bpf_program *prog)
-{
- return prog ? prog->priv : libbpf_err_ptr(-EINVAL);
-}
-
void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
{
prog->prog_ifindex = ifindex;
@@ -8405,25 +8284,9 @@ const char *bpf_program__section_name(const struct bpf_program *prog)
return prog->sec_name;
}
-const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
-{
- const char *title;
-
- title = prog->sec_name;
- if (needs_copy) {
- title = strdup(title);
- if (!title) {
- pr_warn("failed to strdup program title\n");
- return libbpf_err_ptr(-ENOMEM);
- }
- }
-
- return title;
-}
-
bool bpf_program__autoload(const struct bpf_program *prog)
{
- return prog->load;
+ return prog->autoload;
}
int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
@@ -8431,22 +8294,10 @@ int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
if (prog->obj->loaded)
return libbpf_err(-EINVAL);
- prog->load = autoload;
+ prog->autoload = autoload;
return 0;
}
-static int bpf_program_nth_fd(const struct bpf_program *prog, int n);
-
-int bpf_program__fd(const struct bpf_program *prog)
-{
- return bpf_program_nth_fd(prog, 0);
-}
-
-size_t bpf_program__size(const struct bpf_program *prog)
-{
- return prog->insns_cnt * BPF_INSN_SZ;
-}
-
const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog)
{
return prog->insns;
@@ -8457,58 +8308,35 @@ size_t bpf_program__insn_cnt(const struct bpf_program *prog)
return prog->insns_cnt;
}
-int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
- bpf_program_prep_t prep)
+int bpf_program__set_insns(struct bpf_program *prog,
+ struct bpf_insn *new_insns, size_t new_insn_cnt)
{
- int *instances_fds;
-
- if (nr_instances <= 0 || !prep)
- return libbpf_err(-EINVAL);
+ struct bpf_insn *insns;
- if (prog->instances.nr > 0 || prog->instances.fds) {
- pr_warn("Can't set pre-processor after loading\n");
- return libbpf_err(-EINVAL);
- }
+ if (prog->obj->loaded)
+ return -EBUSY;
- instances_fds = malloc(sizeof(int) * nr_instances);
- if (!instances_fds) {
- pr_warn("alloc memory failed for fds\n");
- return libbpf_err(-ENOMEM);
+ insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns));
+ if (!insns) {
+ pr_warn("prog '%s': failed to realloc prog code\n", prog->name);
+ return -ENOMEM;
}
+ memcpy(insns, new_insns, new_insn_cnt * sizeof(*insns));
- /* fill all fd with -1 */
- memset(instances_fds, -1, sizeof(int) * nr_instances);
-
- prog->instances.nr = nr_instances;
- prog->instances.fds = instances_fds;
- prog->preprocessor = prep;
+ prog->insns = insns;
+ prog->insns_cnt = new_insn_cnt;
return 0;
}
-__attribute__((alias("bpf_program_nth_fd")))
-int bpf_program__nth_fd(const struct bpf_program *prog, int n);
-
-static int bpf_program_nth_fd(const struct bpf_program *prog, int n)
+int bpf_program__fd(const struct bpf_program *prog)
{
- int fd;
-
if (!prog)
return libbpf_err(-EINVAL);
- if (n >= prog->instances.nr || n < 0) {
- pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
- n, prog->name, prog->instances.nr);
- return libbpf_err(-EINVAL);
- }
-
- fd = prog->instances.fds[n];
- if (fd < 0) {
- pr_warn("%dth instance of program '%s' is invalid\n",
- n, prog->name);
+ if (prog->fd < 0)
return libbpf_err(-ENOENT);
- }
- return fd;
+ return prog->fd;
}
__alias(bpf_program__type)
@@ -8519,45 +8347,15 @@ enum bpf_prog_type bpf_program__type(const struct bpf_program *prog)
return prog->type;
}
-void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
+int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
{
+ if (prog->obj->loaded)
+ return libbpf_err(-EBUSY);
+
prog->type = type;
+ return 0;
}
-static bool bpf_program__is_type(const struct bpf_program *prog,
- enum bpf_prog_type type)
-{
- return prog ? (prog->type == type) : false;
-}
-
-#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
-int bpf_program__set_##NAME(struct bpf_program *prog) \
-{ \
- if (!prog) \
- return libbpf_err(-EINVAL); \
- bpf_program__set_type(prog, TYPE); \
- return 0; \
-} \
- \
-bool bpf_program__is_##NAME(const struct bpf_program *prog) \
-{ \
- return bpf_program__is_type(prog, TYPE); \
-} \
-
-BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
-BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM);
-BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
-BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
-BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
-BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
-BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
-BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
-BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
-BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
-BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS);
-BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
-BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP);
-
__alias(bpf_program__expected_attach_type)
enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog);
@@ -8566,10 +8364,14 @@ enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program
return prog->expected_attach_type;
}
-void bpf_program__set_expected_attach_type(struct bpf_program *prog,
+int bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type)
{
+ if (prog->obj->loaded)
+ return libbpf_err(-EBUSY);
+
prog->expected_attach_type = type;
+ return 0;
}
__u32 bpf_program__flags(const struct bpf_program *prog)
@@ -8630,6 +8432,9 @@ int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log
}
static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link);
@@ -8638,83 +8443,87 @@ static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_li
static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static const struct bpf_sec_def section_defs[] = {
- SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE | SEC_SLOPPY_PFX),
- SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
- SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
- SEC_DEF("kprobe/", KPROBE, 0, SEC_NONE, attach_kprobe),
- SEC_DEF("uprobe/", KPROBE, 0, SEC_NONE),
- SEC_DEF("kretprobe/", KPROBE, 0, SEC_NONE, attach_kprobe),
- SEC_DEF("uretprobe/", KPROBE, 0, SEC_NONE),
- SEC_DEF("kprobe.multi/", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
- SEC_DEF("kretprobe.multi/", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
+ SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE),
+ SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE),
+ SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE),
+ SEC_DEF("kprobe+", KPROBE, 0, SEC_NONE, attach_kprobe),
+ SEC_DEF("uprobe+", KPROBE, 0, SEC_NONE, attach_uprobe),
+ SEC_DEF("uprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
+ SEC_DEF("kretprobe+", KPROBE, 0, SEC_NONE, attach_kprobe),
+ SEC_DEF("uretprobe+", KPROBE, 0, SEC_NONE, attach_uprobe),
+ SEC_DEF("uretprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
+ SEC_DEF("kprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
+ SEC_DEF("kretprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
+ SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
+ SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
+ SEC_DEF("usdt+", KPROBE, 0, SEC_NONE, attach_usdt),
SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE),
- SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE | SEC_SLOPPY_PFX | SEC_DEPRECATED),
- SEC_DEF("action", SCHED_ACT, 0, SEC_NONE | SEC_SLOPPY_PFX),
- SEC_DEF("tracepoint/", TRACEPOINT, 0, SEC_NONE, attach_tp),
- SEC_DEF("tp/", TRACEPOINT, 0, SEC_NONE, attach_tp),
- SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
- SEC_DEF("raw_tp/", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
- SEC_DEF("raw_tracepoint.w/", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
- SEC_DEF("raw_tp.w/", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
- SEC_DEF("tp_btf/", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace),
- SEC_DEF("fentry/", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace),
- SEC_DEF("fmod_ret/", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace),
- SEC_DEF("fexit/", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace),
- SEC_DEF("fentry.s/", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
- SEC_DEF("fmod_ret.s/", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
- SEC_DEF("fexit.s/", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
- SEC_DEF("freplace/", EXT, 0, SEC_ATTACH_BTF, attach_trace),
- SEC_DEF("lsm/", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm),
- SEC_DEF("lsm.s/", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm),
- SEC_DEF("iter/", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter),
- SEC_DEF("iter.s/", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter),
+ SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE),
+ SEC_DEF("action", SCHED_ACT, 0, SEC_NONE),
+ SEC_DEF("tracepoint+", TRACEPOINT, 0, SEC_NONE, attach_tp),
+ SEC_DEF("tp+", TRACEPOINT, 0, SEC_NONE, attach_tp),
+ SEC_DEF("raw_tracepoint+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
+ SEC_DEF("raw_tp+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
+ SEC_DEF("raw_tracepoint.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
+ SEC_DEF("raw_tp.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
+ SEC_DEF("tp_btf+", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace),
+ SEC_DEF("fentry+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace),
+ SEC_DEF("fmod_ret+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace),
+ SEC_DEF("fexit+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace),
+ SEC_DEF("fentry.s+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
+ SEC_DEF("fmod_ret.s+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
+ SEC_DEF("fexit.s+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
+ SEC_DEF("freplace+", EXT, 0, SEC_ATTACH_BTF, attach_trace),
+ SEC_DEF("lsm+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm),
+ SEC_DEF("lsm.s+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm),
+ SEC_DEF("lsm_cgroup+", LSM, BPF_LSM_CGROUP, SEC_ATTACH_BTF),
+ SEC_DEF("iter+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter),
+ SEC_DEF("iter.s+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter),
SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE),
SEC_DEF("xdp.frags/devmap", XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS),
SEC_DEF("xdp/devmap", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE),
- SEC_DEF("xdp_devmap/", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE | SEC_DEPRECATED),
SEC_DEF("xdp.frags/cpumap", XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS),
SEC_DEF("xdp/cpumap", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE),
- SEC_DEF("xdp_cpumap/", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE | SEC_DEPRECATED),
SEC_DEF("xdp.frags", XDP, BPF_XDP, SEC_XDP_FRAGS),
- SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
- SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE | SEC_SLOPPY_PFX),
- SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE | SEC_SLOPPY_PFX),
- SEC_DEF("lwt_out", LWT_OUT, 0, SEC_NONE | SEC_SLOPPY_PFX),
- SEC_DEF("lwt_xmit", LWT_XMIT, 0, SEC_NONE | SEC_SLOPPY_PFX),
- SEC_DEF("lwt_seg6local", LWT_SEG6LOCAL, 0, SEC_NONE | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup_skb/ingress", CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup_skb/egress", CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/skb", CGROUP_SKB, 0, SEC_NONE | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/sock_create", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/sock_release", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/sock", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/post_bind4", CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/post_bind6", CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
- SEC_DEF("sockops", SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
- SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
- SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
- SEC_DEF("sk_skb", SK_SKB, 0, SEC_NONE | SEC_SLOPPY_PFX),
- SEC_DEF("sk_msg", SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
- SEC_DEF("lirc_mode2", LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
- SEC_DEF("flow_dissector", FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/bind4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/bind6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/connect4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/connect6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/sendmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/sendmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/recvmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/recvmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/getpeername4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/getpeername6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/getsockname4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/getsockname6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/sysctl", CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/getsockopt", CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
- SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT),
+ SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE),
+ SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE),
+ SEC_DEF("lwt_out", LWT_OUT, 0, SEC_NONE),
+ SEC_DEF("lwt_xmit", LWT_XMIT, 0, SEC_NONE),
+ SEC_DEF("lwt_seg6local", LWT_SEG6LOCAL, 0, SEC_NONE),
+ SEC_DEF("sockops", SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT),
+ SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT),
+ SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT),
+ SEC_DEF("sk_skb", SK_SKB, 0, SEC_NONE),
+ SEC_DEF("sk_msg", SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT),
+ SEC_DEF("lirc_mode2", LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT),
+ SEC_DEF("flow_dissector", FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT),
+ SEC_DEF("cgroup_skb/ingress", CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT),
+ SEC_DEF("cgroup_skb/egress", CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT),
+ SEC_DEF("cgroup/skb", CGROUP_SKB, 0, SEC_NONE),
+ SEC_DEF("cgroup/sock_create", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/sock_release", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/sock", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT),
+ SEC_DEF("cgroup/post_bind4", CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/post_bind6", CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/bind4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/bind6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/connect4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/connect6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/sendmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/sendmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/recvmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/recvmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/getpeername4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/getpeername6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/getsockname4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/getsockname6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/sysctl", CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/getsockopt", CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT),
SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE),
- SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
+ SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE),
};
static size_t custom_sec_def_cnt;
@@ -8809,8 +8618,7 @@ int libbpf_unregister_prog_handler(int handler_id)
return 0;
}
-static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name,
- bool allow_sloppy)
+static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name)
{
size_t len = strlen(sec_def->sec);
@@ -8835,17 +8643,6 @@ static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_n
return false;
}
- /* SEC_SLOPPY_PFX definitions are allowed to be just prefix
- * matches, unless strict section name mode
- * (LIBBPF_STRICT_SEC_NAME) is enabled, in which case the
- * match has to be exact.
- */
- if (allow_sloppy && str_has_pfx(sec_name, sec_def->sec))
- return true;
-
- /* Definitions not marked SEC_SLOPPY_PFX (e.g.,
- * SEC("syscall")) are exact matches in both modes.
- */
return strcmp(sec_name, sec_def->sec) == 0;
}
@@ -8853,20 +8650,18 @@ static const struct bpf_sec_def *find_sec_def(const char *sec_name)
{
const struct bpf_sec_def *sec_def;
int i, n;
- bool strict = libbpf_mode & LIBBPF_STRICT_SEC_NAME, allow_sloppy;
n = custom_sec_def_cnt;
for (i = 0; i < n; i++) {
sec_def = &custom_sec_defs[i];
- if (sec_def_matches(sec_def, sec_name, false))
+ if (sec_def_matches(sec_def, sec_name))
return sec_def;
}
n = ARRAY_SIZE(section_defs);
for (i = 0; i < n; i++) {
sec_def = &section_defs[i];
- allow_sloppy = (sec_def->cookie & SEC_SLOPPY_PFX) && !strict;
- if (sec_def_matches(sec_def, sec_name, allow_sloppy))
+ if (sec_def_matches(sec_def, sec_name))
return sec_def;
}
@@ -8937,6 +8732,38 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
return libbpf_err(-ESRCH);
}
+const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t)
+{
+ if (t < 0 || t >= ARRAY_SIZE(attach_type_name))
+ return NULL;
+
+ return attach_type_name[t];
+}
+
+const char *libbpf_bpf_link_type_str(enum bpf_link_type t)
+{
+ if (t < 0 || t >= ARRAY_SIZE(link_type_name))
+ return NULL;
+
+ return link_type_name[t];
+}
+
+const char *libbpf_bpf_map_type_str(enum bpf_map_type t)
+{
+ if (t < 0 || t >= ARRAY_SIZE(map_type_name))
+ return NULL;
+
+ return map_type_name[t];
+}
+
+const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t)
+{
+ if (t < 0 || t >= ARRAY_SIZE(prog_type_name))
+ return NULL;
+
+ return prog_type_name[t];
+}
+
static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
size_t offset)
{
@@ -9087,6 +8914,7 @@ void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
*kind = BTF_KIND_TYPEDEF;
break;
case BPF_LSM_MAC:
+ case BPF_LSM_CGROUP:
*prefix = BTF_LSM_PREFIX;
*kind = BTF_KIND_FUNC;
break;
@@ -9290,11 +9118,6 @@ int bpf_map__fd(const struct bpf_map *map)
return map ? map->fd : libbpf_err(-EINVAL);
}
-const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
-{
- return map ? &map->def : libbpf_err_ptr(-EINVAL);
-}
-
static bool map_uses_real_name(const struct bpf_map *map)
{
/* Since libbpf started to support custom .data.* and .rodata.* maps,
@@ -9409,27 +9232,6 @@ __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
return map ? map->btf_value_type_id : 0;
}
-int bpf_map__set_priv(struct bpf_map *map, void *priv,
- bpf_map_clear_priv_t clear_priv)
-{
- if (!map)
- return libbpf_err(-EINVAL);
-
- if (map->priv) {
- if (map->clear_priv)
- map->clear_priv(map, map->priv);
- }
-
- map->priv = priv;
- map->clear_priv = clear_priv;
- return 0;
-}
-
-void *bpf_map__priv(const struct bpf_map *map)
-{
- return map ? map->priv : libbpf_err_ptr(-EINVAL);
-}
-
int bpf_map__set_initial_value(struct bpf_map *map,
const void *data, size_t size)
{
@@ -9449,11 +9251,6 @@ const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
return map->mmaped;
}
-bool bpf_map__is_offload_neutral(const struct bpf_map *map)
-{
- return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
-}
-
bool bpf_map__is_internal(const struct bpf_map *map)
{
return map->libbpf_type != LIBBPF_MAP_UNSPEC;
@@ -9515,12 +9312,6 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
}
struct bpf_map *
-bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
-{
- return bpf_object__next_map(obj, prev);
-}
-
-struct bpf_map *
bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
{
if (prev == NULL)
@@ -9530,12 +9321,6 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
}
struct bpf_map *
-bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
-{
- return bpf_object__prev_map(obj, next);
-}
-
-struct bpf_map *
bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
{
if (next == NULL) {
@@ -9580,126 +9365,126 @@ bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
}
-struct bpf_map *
-bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
+static int validate_map_op(const struct bpf_map *map, size_t key_sz,
+ size_t value_sz, bool check_value_sz)
{
- return libbpf_err_ptr(-ENOTSUP);
+ if (map->fd <= 0)
+ return -ENOENT;
+
+ if (map->def.key_size != key_sz) {
+ pr_warn("map '%s': unexpected key size %zu provided, expected %u\n",
+ map->name, key_sz, map->def.key_size);
+ return -EINVAL;
+ }
+
+ if (!check_value_sz)
+ return 0;
+
+ switch (map->def.type) {
+ case BPF_MAP_TYPE_PERCPU_ARRAY:
+ case BPF_MAP_TYPE_PERCPU_HASH:
+ case BPF_MAP_TYPE_LRU_PERCPU_HASH:
+ case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: {
+ int num_cpu = libbpf_num_possible_cpus();
+ size_t elem_sz = roundup(map->def.value_size, 8);
+
+ if (value_sz != num_cpu * elem_sz) {
+ pr_warn("map '%s': unexpected value size %zu provided for per-CPU map, expected %d * %zu = %zd\n",
+ map->name, value_sz, num_cpu, elem_sz, num_cpu * elem_sz);
+ return -EINVAL;
+ }
+ break;
+ }
+ default:
+ if (map->def.value_size != value_sz) {
+ pr_warn("map '%s': unexpected value size %zu provided, expected %u\n",
+ map->name, value_sz, map->def.value_size);
+ return -EINVAL;
+ }
+ break;
+ }
+ return 0;
}
-long libbpf_get_error(const void *ptr)
+int bpf_map__lookup_elem(const struct bpf_map *map,
+ const void *key, size_t key_sz,
+ void *value, size_t value_sz, __u64 flags)
{
- if (!IS_ERR_OR_NULL(ptr))
- return 0;
+ int err;
- if (IS_ERR(ptr))
- errno = -PTR_ERR(ptr);
+ err = validate_map_op(map, key_sz, value_sz, true);
+ if (err)
+ return libbpf_err(err);
- /* If ptr == NULL, then errno should be already set by the failing
- * API, because libbpf never returns NULL on success and it now always
- * sets errno on error. So no extra errno handling for ptr == NULL
- * case.
- */
- return -errno;
+ return bpf_map_lookup_elem_flags(map->fd, key, value, flags);
}
-__attribute__((alias("bpf_prog_load_xattr2")))
-int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
- struct bpf_object **pobj, int *prog_fd);
-
-static int bpf_prog_load_xattr2(const struct bpf_prog_load_attr *attr,
- struct bpf_object **pobj, int *prog_fd)
+int bpf_map__update_elem(const struct bpf_map *map,
+ const void *key, size_t key_sz,
+ const void *value, size_t value_sz, __u64 flags)
{
- struct bpf_object_open_attr open_attr = {};
- struct bpf_program *prog, *first_prog = NULL;
- struct bpf_object *obj;
- struct bpf_map *map;
int err;
- if (!attr)
- return libbpf_err(-EINVAL);
- if (!attr->file)
- return libbpf_err(-EINVAL);
+ err = validate_map_op(map, key_sz, value_sz, true);
+ if (err)
+ return libbpf_err(err);
- open_attr.file = attr->file;
- open_attr.prog_type = attr->prog_type;
+ return bpf_map_update_elem(map->fd, key, value, flags);
+}
- obj = __bpf_object__open_xattr(&open_attr, 0);
- err = libbpf_get_error(obj);
+int bpf_map__delete_elem(const struct bpf_map *map,
+ const void *key, size_t key_sz, __u64 flags)
+{
+ int err;
+
+ err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
if (err)
- return libbpf_err(-ENOENT);
+ return libbpf_err(err);
- bpf_object__for_each_program(prog, obj) {
- enum bpf_attach_type attach_type = attr->expected_attach_type;
- /*
- * to preserve backwards compatibility, bpf_prog_load treats
- * attr->prog_type, if specified, as an override to whatever
- * bpf_object__open guessed
- */
- if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
- bpf_program__set_type(prog, attr->prog_type);
- bpf_program__set_expected_attach_type(prog,
- attach_type);
- }
- if (bpf_program__type(prog) == BPF_PROG_TYPE_UNSPEC) {
- /*
- * we haven't guessed from section name and user
- * didn't provide a fallback type, too bad...
- */
- bpf_object__close(obj);
- return libbpf_err(-EINVAL);
- }
+ return bpf_map_delete_elem_flags(map->fd, key, flags);
+}
- prog->prog_ifindex = attr->ifindex;
- prog->log_level = attr->log_level;
- prog->prog_flags |= attr->prog_flags;
- if (!first_prog)
- first_prog = prog;
- }
+int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
+ const void *key, size_t key_sz,
+ void *value, size_t value_sz, __u64 flags)
+{
+ int err;
- bpf_object__for_each_map(map, obj) {
- if (map->def.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
- map->map_ifindex = attr->ifindex;
- }
+ err = validate_map_op(map, key_sz, value_sz, true);
+ if (err)
+ return libbpf_err(err);
- if (!first_prog) {
- pr_warn("object file doesn't contain bpf program\n");
- bpf_object__close(obj);
- return libbpf_err(-ENOENT);
- }
+ return bpf_map_lookup_and_delete_elem_flags(map->fd, key, value, flags);
+}
- err = bpf_object__load(obj);
- if (err) {
- bpf_object__close(obj);
+int bpf_map__get_next_key(const struct bpf_map *map,
+ const void *cur_key, void *next_key, size_t key_sz)
+{
+ int err;
+
+ err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
+ if (err)
return libbpf_err(err);
- }
- *pobj = obj;
- *prog_fd = bpf_program__fd(first_prog);
- return 0;
+ return bpf_map_get_next_key(map->fd, cur_key, next_key);
}
-COMPAT_VERSION(bpf_prog_load_deprecated, bpf_prog_load, LIBBPF_0.0.1)
-int bpf_prog_load_deprecated(const char *file, enum bpf_prog_type type,
- struct bpf_object **pobj, int *prog_fd)
+long libbpf_get_error(const void *ptr)
{
- struct bpf_prog_load_attr attr;
+ if (!IS_ERR_OR_NULL(ptr))
+ return 0;
- memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
- attr.file = file;
- attr.prog_type = type;
- attr.expected_attach_type = 0;
+ if (IS_ERR(ptr))
+ errno = -PTR_ERR(ptr);
- return bpf_prog_load_xattr2(&attr, pobj, prog_fd);
+ /* If ptr == NULL, then errno should be already set by the failing
+ * API, because libbpf never returns NULL on success and it now always
+ * sets errno on error. So no extra errno handling for ptr == NULL
+ * case.
+ */
+ return -errno;
}
-struct bpf_link {
- int (*detach)(struct bpf_link *link);
- void (*dealloc)(struct bpf_link *link);
- char *pin_path; /* NULL, if not pinned */
- int fd; /* hook FD, -1 if not applicable */
- bool disconnected;
-};
-
/* Replace link's underlying BPF program with the new one */
int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
{
@@ -10027,7 +9812,7 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
{
struct perf_event_attr attr = {};
char errmsg[STRERR_BUFSIZE];
- int type, pfd, err;
+ int type, pfd;
if (ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS))
return -EINVAL;
@@ -10063,14 +9848,7 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
pid < 0 ? -1 : pid /* pid */,
pid == -1 ? 0 : -1 /* cpu */,
-1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
- if (pfd < 0) {
- err = -errno;
- pr_warn("%s perf_event_open() failed: %s\n",
- uprobe ? "uprobe" : "kprobe",
- libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
- return err;
- }
- return pfd;
+ return pfd >= 0 ? pfd : -errno;
}
static int append_to_file(const char *file, const char *fmt, ...)
@@ -10093,6 +9871,34 @@ static int append_to_file(const char *file, const char *fmt, ...)
return err;
}
+#define DEBUGFS "/sys/kernel/debug/tracing"
+#define TRACEFS "/sys/kernel/tracing"
+
+static bool use_debugfs(void)
+{
+ static int has_debugfs = -1;
+
+ if (has_debugfs < 0)
+ has_debugfs = access(DEBUGFS, F_OK) == 0;
+
+ return has_debugfs == 1;
+}
+
+static const char *tracefs_path(void)
+{
+ return use_debugfs() ? DEBUGFS : TRACEFS;
+}
+
+static const char *tracefs_kprobe_events(void)
+{
+ return use_debugfs() ? DEBUGFS"/kprobe_events" : TRACEFS"/kprobe_events";
+}
+
+static const char *tracefs_uprobe_events(void)
+{
+ return use_debugfs() ? DEBUGFS"/uprobe_events" : TRACEFS"/uprobe_events";
+}
+
static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz,
const char *kfunc_name, size_t offset)
{
@@ -10105,9 +9911,7 @@ static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz,
static int add_kprobe_event_legacy(const char *probe_name, bool retprobe,
const char *kfunc_name, size_t offset)
{
- const char *file = "/sys/kernel/debug/tracing/kprobe_events";
-
- return append_to_file(file, "%c:%s/%s %s+0x%zx",
+ return append_to_file(tracefs_kprobe_events(), "%c:%s/%s %s+0x%zx",
retprobe ? 'r' : 'p',
retprobe ? "kretprobes" : "kprobes",
probe_name, kfunc_name, offset);
@@ -10115,18 +9919,16 @@ static int add_kprobe_event_legacy(const char *probe_name, bool retprobe,
static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe)
{
- const char *file = "/sys/kernel/debug/tracing/kprobe_events";
-
- return append_to_file(file, "-:%s/%s", retprobe ? "kretprobes" : "kprobes", probe_name);
+ return append_to_file(tracefs_kprobe_events(), "-:%s/%s",
+ retprobe ? "kretprobes" : "kprobes", probe_name);
}
static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe)
{
char file[256];
- snprintf(file, sizeof(file),
- "/sys/kernel/debug/tracing/events/%s/%s/id",
- retprobe ? "kretprobes" : "kprobes", probe_name);
+ snprintf(file, sizeof(file), "%s/events/%s/%s/id",
+ tracefs_path(), retprobe ? "kretprobes" : "kprobes", probe_name);
return parse_uint_from_file(file, "%d\n");
}
@@ -10147,10 +9949,11 @@ static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
}
type = determine_kprobe_perf_type_legacy(probe_name, retprobe);
if (type < 0) {
+ err = type;
pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n",
kfunc_name, offset,
- libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
- return type;
+ libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ goto err_clean_legacy;
}
attr.size = sizeof(attr);
attr.config = type;
@@ -10164,9 +9967,72 @@ static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
err = -errno;
pr_warn("legacy kprobe perf_event_open() failed: %s\n",
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
- return err;
+ goto err_clean_legacy;
}
return pfd;
+
+err_clean_legacy:
+ /* Clear the newly added legacy kprobe_event */
+ remove_kprobe_event_legacy(probe_name, retprobe);
+ return err;
+}
+
+static const char *arch_specific_syscall_pfx(void)
+{
+#if defined(__x86_64__)
+ return "x64";
+#elif defined(__i386__)
+ return "ia32";
+#elif defined(__s390x__)
+ return "s390x";
+#elif defined(__s390__)
+ return "s390";
+#elif defined(__arm__)
+ return "arm";
+#elif defined(__aarch64__)
+ return "arm64";
+#elif defined(__mips__)
+ return "mips";
+#elif defined(__riscv)
+ return "riscv";
+#elif defined(__powerpc__)
+ return "powerpc";
+#elif defined(__powerpc64__)
+ return "powerpc64";
+#else
+ return NULL;
+#endif
+}
+
+static int probe_kern_syscall_wrapper(void)
+{
+ char syscall_name[64];
+ const char *ksys_pfx;
+
+ ksys_pfx = arch_specific_syscall_pfx();
+ if (!ksys_pfx)
+ return 0;
+
+ snprintf(syscall_name, sizeof(syscall_name), "__%s_sys_bpf", ksys_pfx);
+
+ if (determine_kprobe_perf_type() >= 0) {
+ int pfd;
+
+ pfd = perf_event_open_probe(false, false, syscall_name, 0, getpid(), 0);
+ if (pfd >= 0)
+ close(pfd);
+
+ return pfd >= 0 ? 1 : 0;
+ } else { /* legacy mode */
+ char probe_name[128];
+
+ gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0);
+ if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0)
+ return 0;
+
+ (void)remove_kprobe_event_legacy(probe_name, false);
+ return 1;
+ }
}
struct bpf_link *
@@ -10223,7 +10089,7 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
prog->name, retprobe ? "kretprobe" : "kprobe",
func_name, offset,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
- goto err_out;
+ goto err_clean_legacy;
}
if (legacy) {
struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
@@ -10234,6 +10100,10 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
}
return link;
+
+err_clean_legacy:
+ if (legacy)
+ remove_kprobe_event_legacy(legacy_probe, retprobe);
err_out:
free(legacy_probe);
return libbpf_err_ptr(err);
@@ -10250,6 +10120,34 @@ struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
}
+struct bpf_link *bpf_program__attach_ksyscall(const struct bpf_program *prog,
+ const char *syscall_name,
+ const struct bpf_ksyscall_opts *opts)
+{
+ LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
+ char func_name[128];
+
+ if (!OPTS_VALID(opts, bpf_ksyscall_opts))
+ return libbpf_err_ptr(-EINVAL);
+
+ if (kernel_supports(prog->obj, FEAT_SYSCALL_WRAPPER)) {
+ /* arch_specific_syscall_pfx() should never return NULL here
+ * because it is guarded by kernel_supports(). However, since
+ * compiler does not know that we have an explicit conditional
+ * as well.
+ */
+ snprintf(func_name, sizeof(func_name), "__%s_sys_%s",
+ arch_specific_syscall_pfx() ? : "", syscall_name);
+ } else {
+ snprintf(func_name, sizeof(func_name), "__se_sys_%s", syscall_name);
+ }
+
+ kprobe_opts.retprobe = OPTS_GET(opts, retprobe, false);
+ kprobe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
+
+ return bpf_program__attach_kprobe_opts(prog, func_name, &kprobe_opts);
+}
+
/* Adapted from perf/util/string.c */
static bool glob_match(const char *str, const char *pat)
{
@@ -10391,6 +10289,12 @@ static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf
char *func;
int n;
+ *link = NULL;
+
+ /* no auto-attach for SEC("kprobe") and SEC("kretprobe") */
+ if (strcmp(prog->sec_name, "kprobe") == 0 || strcmp(prog->sec_name, "kretprobe") == 0)
+ return 0;
+
opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/");
if (opts.retprobe)
func_name = prog->sec_name + sizeof("kretprobe/") - 1;
@@ -10414,6 +10318,27 @@ static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf
return libbpf_get_error(*link);
}
+static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link)
+{
+ LIBBPF_OPTS(bpf_ksyscall_opts, opts);
+ const char *syscall_name;
+
+ *link = NULL;
+
+ /* no auto-attach for SEC("ksyscall") and SEC("kretsyscall") */
+ if (strcmp(prog->sec_name, "ksyscall") == 0 || strcmp(prog->sec_name, "kretsyscall") == 0)
+ return 0;
+
+ opts.retprobe = str_has_pfx(prog->sec_name, "kretsyscall/");
+ if (opts.retprobe)
+ syscall_name = prog->sec_name + sizeof("kretsyscall/") - 1;
+ else
+ syscall_name = prog->sec_name + sizeof("ksyscall/") - 1;
+
+ *link = bpf_program__attach_ksyscall(prog, syscall_name, &opts);
+ return *link ? 0 : -errno;
+}
+
static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
@@ -10421,6 +10346,13 @@ static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, stru
char *pattern;
int n;
+ *link = NULL;
+
+ /* no auto-attach for SEC("kprobe.multi") and SEC("kretprobe.multi") */
+ if (strcmp(prog->sec_name, "kprobe.multi") == 0 ||
+ strcmp(prog->sec_name, "kretprobe.multi") == 0)
+ return 0;
+
opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/");
if (opts.retprobe)
spec = prog->sec_name + sizeof("kretprobe.multi/") - 1;
@@ -10455,9 +10387,7 @@ static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,
const char *binary_path, size_t offset)
{
- const char *file = "/sys/kernel/debug/tracing/uprobe_events";
-
- return append_to_file(file, "%c:%s/%s %s:0x%zx",
+ return append_to_file(tracefs_uprobe_events(), "%c:%s/%s %s:0x%zx",
retprobe ? 'r' : 'p',
retprobe ? "uretprobes" : "uprobes",
probe_name, binary_path, offset);
@@ -10465,18 +10395,16 @@ static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,
static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe)
{
- const char *file = "/sys/kernel/debug/tracing/uprobe_events";
-
- return append_to_file(file, "-:%s/%s", retprobe ? "uretprobes" : "uprobes", probe_name);
+ return append_to_file(tracefs_uprobe_events(), "-:%s/%s",
+ retprobe ? "uretprobes" : "uprobes", probe_name);
}
static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe)
{
char file[512];
- snprintf(file, sizeof(file),
- "/sys/kernel/debug/tracing/events/%s/%s/id",
- retprobe ? "uretprobes" : "uprobes", probe_name);
+ snprintf(file, sizeof(file), "%s/events/%s/%s/id",
+ tracefs_path(), retprobe ? "uretprobes" : "uprobes", probe_name);
return parse_uint_from_file(file, "%d\n");
}
@@ -10495,9 +10423,10 @@ static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
}
type = determine_uprobe_perf_type_legacy(probe_name, retprobe);
if (type < 0) {
+ err = type;
pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n",
binary_path, offset, err);
- return type;
+ goto err_clean_legacy;
}
memset(&attr, 0, sizeof(attr));
@@ -10512,9 +10441,262 @@ static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
if (pfd < 0) {
err = -errno;
pr_warn("legacy uprobe perf_event_open() failed: %d\n", err);
- return err;
+ goto err_clean_legacy;
}
return pfd;
+
+err_clean_legacy:
+ /* Clear the newly added legacy uprobe_event */
+ remove_uprobe_event_legacy(probe_name, retprobe);
+ return err;
+}
+
+/* Return next ELF section of sh_type after scn, or first of that type if scn is NULL. */
+static Elf_Scn *elf_find_next_scn_by_type(Elf *elf, int sh_type, Elf_Scn *scn)
+{
+ while ((scn = elf_nextscn(elf, scn)) != NULL) {
+ GElf_Shdr sh;
+
+ if (!gelf_getshdr(scn, &sh))
+ continue;
+ if (sh.sh_type == sh_type)
+ return scn;
+ }
+ return NULL;
+}
+
+/* Find offset of function name in object specified by path. "name" matches
+ * symbol name or name@@LIB for library functions.
+ */
+static long elf_find_func_offset(const char *binary_path, const char *name)
+{
+ int fd, i, sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB };
+ bool is_shared_lib, is_name_qualified;
+ char errmsg[STRERR_BUFSIZE];
+ long ret = -ENOENT;
+ size_t name_len;
+ GElf_Ehdr ehdr;
+ Elf *elf;
+
+ fd = open(binary_path, O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ ret = -errno;
+ pr_warn("failed to open %s: %s\n", binary_path,
+ libbpf_strerror_r(ret, errmsg, sizeof(errmsg)));
+ return ret;
+ }
+ elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
+ if (!elf) {
+ pr_warn("elf: could not read elf from %s: %s\n", binary_path, elf_errmsg(-1));
+ close(fd);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
+ if (!gelf_getehdr(elf, &ehdr)) {
+ pr_warn("elf: failed to get ehdr from %s: %s\n", binary_path, elf_errmsg(-1));
+ ret = -LIBBPF_ERRNO__FORMAT;
+ goto out;
+ }
+ /* for shared lib case, we do not need to calculate relative offset */
+ is_shared_lib = ehdr.e_type == ET_DYN;
+
+ name_len = strlen(name);
+ /* Does name specify "@@LIB"? */
+ is_name_qualified = strstr(name, "@@") != NULL;
+
+ /* Search SHT_DYNSYM, SHT_SYMTAB for symbol. This search order is used because if
+ * a binary is stripped, it may only have SHT_DYNSYM, and a fully-statically
+ * linked binary may not have SHT_DYMSYM, so absence of a section should not be
+ * reported as a warning/error.
+ */
+ for (i = 0; i < ARRAY_SIZE(sh_types); i++) {
+ size_t nr_syms, strtabidx, idx;
+ Elf_Data *symbols = NULL;
+ Elf_Scn *scn = NULL;
+ int last_bind = -1;
+ const char *sname;
+ GElf_Shdr sh;
+
+ scn = elf_find_next_scn_by_type(elf, sh_types[i], NULL);
+ if (!scn) {
+ pr_debug("elf: failed to find symbol table ELF sections in '%s'\n",
+ binary_path);
+ continue;
+ }
+ if (!gelf_getshdr(scn, &sh))
+ continue;
+ strtabidx = sh.sh_link;
+ symbols = elf_getdata(scn, 0);
+ if (!symbols) {
+ pr_warn("elf: failed to get symbols for symtab section in '%s': %s\n",
+ binary_path, elf_errmsg(-1));
+ ret = -LIBBPF_ERRNO__FORMAT;
+ goto out;
+ }
+ nr_syms = symbols->d_size / sh.sh_entsize;
+
+ for (idx = 0; idx < nr_syms; idx++) {
+ int curr_bind;
+ GElf_Sym sym;
+ Elf_Scn *sym_scn;
+ GElf_Shdr sym_sh;
+
+ if (!gelf_getsym(symbols, idx, &sym))
+ continue;
+
+ if (GELF_ST_TYPE(sym.st_info) != STT_FUNC)
+ continue;
+
+ sname = elf_strptr(elf, strtabidx, sym.st_name);
+ if (!sname)
+ continue;
+
+ curr_bind = GELF_ST_BIND(sym.st_info);
+
+ /* User can specify func, func@@LIB or func@@LIB_VERSION. */
+ if (strncmp(sname, name, name_len) != 0)
+ continue;
+ /* ...but we don't want a search for "foo" to match 'foo2" also, so any
+ * additional characters in sname should be of the form "@@LIB".
+ */
+ if (!is_name_qualified && sname[name_len] != '\0' && sname[name_len] != '@')
+ continue;
+
+ if (ret >= 0) {
+ /* handle multiple matches */
+ if (last_bind != STB_WEAK && curr_bind != STB_WEAK) {
+ /* Only accept one non-weak bind. */
+ pr_warn("elf: ambiguous match for '%s', '%s' in '%s'\n",
+ sname, name, binary_path);
+ ret = -LIBBPF_ERRNO__FORMAT;
+ goto out;
+ } else if (curr_bind == STB_WEAK) {
+ /* already have a non-weak bind, and
+ * this is a weak bind, so ignore.
+ */
+ continue;
+ }
+ }
+
+ /* Transform symbol's virtual address (absolute for
+ * binaries and relative for shared libs) into file
+ * offset, which is what kernel is expecting for
+ * uprobe/uretprobe attachment.
+ * See Documentation/trace/uprobetracer.rst for more
+ * details.
+ * This is done by looking up symbol's containing
+ * section's header and using it's virtual address
+ * (sh_addr) and corresponding file offset (sh_offset)
+ * to transform sym.st_value (virtual address) into
+ * desired final file offset.
+ */
+ sym_scn = elf_getscn(elf, sym.st_shndx);
+ if (!sym_scn)
+ continue;
+ if (!gelf_getshdr(sym_scn, &sym_sh))
+ continue;
+
+ ret = sym.st_value - sym_sh.sh_addr + sym_sh.sh_offset;
+ last_bind = curr_bind;
+ }
+ if (ret > 0)
+ break;
+ }
+
+ if (ret > 0) {
+ pr_debug("elf: symbol address match for '%s' in '%s': 0x%lx\n", name, binary_path,
+ ret);
+ } else {
+ if (ret == 0) {
+ pr_warn("elf: '%s' is 0 in symtab for '%s': %s\n", name, binary_path,
+ is_shared_lib ? "should not be 0 in a shared library" :
+ "try using shared library path instead");
+ ret = -ENOENT;
+ } else {
+ pr_warn("elf: failed to find symbol '%s' in '%s'\n", name, binary_path);
+ }
+ }
+out:
+ elf_end(elf);
+ close(fd);
+ return ret;
+}
+
+static const char *arch_specific_lib_paths(void)
+{
+ /*
+ * Based on https://packages.debian.org/sid/libc6.
+ *
+ * Assume that the traced program is built for the same architecture
+ * as libbpf, which should cover the vast majority of cases.
+ */
+#if defined(__x86_64__)
+ return "/lib/x86_64-linux-gnu";
+#elif defined(__i386__)
+ return "/lib/i386-linux-gnu";
+#elif defined(__s390x__)
+ return "/lib/s390x-linux-gnu";
+#elif defined(__s390__)
+ return "/lib/s390-linux-gnu";
+#elif defined(__arm__) && defined(__SOFTFP__)
+ return "/lib/arm-linux-gnueabi";
+#elif defined(__arm__) && !defined(__SOFTFP__)
+ return "/lib/arm-linux-gnueabihf";
+#elif defined(__aarch64__)
+ return "/lib/aarch64-linux-gnu";
+#elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 64
+ return "/lib/mips64el-linux-gnuabi64";
+#elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 32
+ return "/lib/mipsel-linux-gnu";
+#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return "/lib/powerpc64le-linux-gnu";
+#elif defined(__sparc__) && defined(__arch64__)
+ return "/lib/sparc64-linux-gnu";
+#elif defined(__riscv) && __riscv_xlen == 64
+ return "/lib/riscv64-linux-gnu";
+#else
+ return NULL;
+#endif
+}
+
+/* Get full path to program/shared library. */
+static int resolve_full_path(const char *file, char *result, size_t result_sz)
+{
+ const char *search_paths[3] = {};
+ int i;
+
+ if (str_has_sfx(file, ".so") || strstr(file, ".so.")) {
+ search_paths[0] = getenv("LD_LIBRARY_PATH");
+ search_paths[1] = "/usr/lib64:/usr/lib";
+ search_paths[2] = arch_specific_lib_paths();
+ } else {
+ search_paths[0] = getenv("PATH");
+ search_paths[1] = "/usr/bin:/usr/sbin";
+ }
+
+ for (i = 0; i < ARRAY_SIZE(search_paths); i++) {
+ const char *s;
+
+ if (!search_paths[i])
+ continue;
+ for (s = search_paths[i]; s != NULL; s = strchr(s, ':')) {
+ char *next_path;
+ int seg_len;
+
+ if (s[0] == ':')
+ s++;
+ next_path = strchr(s, ':');
+ seg_len = next_path ? next_path - s : strlen(s);
+ if (!seg_len)
+ continue;
+ snprintf(result, result_sz, "%.*s/%s", seg_len, s, file);
+ /* ensure it is an executable file/link */
+ if (access(result, R_OK | X_OK) < 0)
+ continue;
+ pr_debug("resolved '%s' to '%s'\n", file, result);
+ return 0;
+ }
+ }
+ return -ENOENT;
}
LIBBPF_API struct bpf_link *
@@ -10524,10 +10706,12 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
{
DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL;
+ char full_binary_path[PATH_MAX];
struct bpf_link *link;
size_t ref_ctr_off;
int pfd, err;
bool retprobe, legacy;
+ const char *func_name;
if (!OPTS_VALID(opts, bpf_uprobe_opts))
return libbpf_err_ptr(-EINVAL);
@@ -10536,12 +10720,35 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
+ if (!binary_path)
+ return libbpf_err_ptr(-EINVAL);
+
+ if (!strchr(binary_path, '/')) {
+ err = resolve_full_path(binary_path, full_binary_path,
+ sizeof(full_binary_path));
+ if (err) {
+ pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
+ prog->name, binary_path, err);
+ return libbpf_err_ptr(err);
+ }
+ binary_path = full_binary_path;
+ }
+ func_name = OPTS_GET(opts, func_name, NULL);
+ if (func_name) {
+ long sym_off;
+
+ sym_off = elf_find_func_offset(binary_path, func_name);
+ if (sym_off < 0)
+ return libbpf_err_ptr(sym_off);
+ func_offset += sym_off;
+ }
+
legacy = determine_uprobe_perf_type() < 0;
if (!legacy) {
pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
func_offset, pid, ref_ctr_off);
} else {
- char probe_name[512];
+ char probe_name[PATH_MAX + 64];
if (ref_ctr_off)
return libbpf_err_ptr(-EINVAL);
@@ -10573,7 +10780,7 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
prog->name, retprobe ? "uretprobe" : "uprobe",
binary_path, func_offset,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
- goto err_out;
+ goto err_clean_legacy;
}
if (legacy) {
struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
@@ -10583,10 +10790,68 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
perf_link->legacy_is_retprobe = retprobe;
}
return link;
+
+err_clean_legacy:
+ if (legacy)
+ remove_uprobe_event_legacy(legacy_probe, retprobe);
err_out:
free(legacy_probe);
return libbpf_err_ptr(err);
+}
+/* Format of u[ret]probe section definition supporting auto-attach:
+ * u[ret]probe/binary:function[+offset]
+ *
+ * binary can be an absolute/relative path or a filename; the latter is resolved to a
+ * full binary path via bpf_program__attach_uprobe_opts.
+ *
+ * Specifying uprobe+ ensures we carry out strict matching; either "uprobe" must be
+ * specified (and auto-attach is not possible) or the above format is specified for
+ * auto-attach.
+ */
+static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
+{
+ DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
+ char *probe_type = NULL, *binary_path = NULL, *func_name = NULL;
+ int n, ret = -EINVAL;
+ long offset = 0;
+
+ *link = NULL;
+
+ n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[a-zA-Z0-9_.]+%li",
+ &probe_type, &binary_path, &func_name, &offset);
+ switch (n) {
+ case 1:
+ /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
+ ret = 0;
+ break;
+ case 2:
+ pr_warn("prog '%s': section '%s' missing ':function[+offset]' specification\n",
+ prog->name, prog->sec_name);
+ break;
+ case 3:
+ case 4:
+ opts.retprobe = strcmp(probe_type, "uretprobe") == 0 ||
+ strcmp(probe_type, "uretprobe.s") == 0;
+ if (opts.retprobe && offset != 0) {
+ pr_warn("prog '%s': uretprobes do not support offset specification\n",
+ prog->name);
+ break;
+ }
+ opts.func_name = func_name;
+ *link = bpf_program__attach_uprobe_opts(prog, -1, binary_path, offset, &opts);
+ ret = libbpf_get_error(*link);
+ break;
+ default:
+ pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
+ prog->sec_name);
+ break;
+ }
+ free(probe_type);
+ free(binary_path);
+ free(func_name);
+
+ return ret;
}
struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog,
@@ -10599,15 +10864,96 @@ struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog,
return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts);
}
+struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog,
+ pid_t pid, const char *binary_path,
+ const char *usdt_provider, const char *usdt_name,
+ const struct bpf_usdt_opts *opts)
+{
+ char resolved_path[512];
+ struct bpf_object *obj = prog->obj;
+ struct bpf_link *link;
+ __u64 usdt_cookie;
+ int err;
+
+ if (!OPTS_VALID(opts, bpf_uprobe_opts))
+ return libbpf_err_ptr(-EINVAL);
+
+ if (bpf_program__fd(prog) < 0) {
+ pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
+ prog->name);
+ return libbpf_err_ptr(-EINVAL);
+ }
+
+ if (!binary_path)
+ return libbpf_err_ptr(-EINVAL);
+
+ if (!strchr(binary_path, '/')) {
+ err = resolve_full_path(binary_path, resolved_path, sizeof(resolved_path));
+ if (err) {
+ pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
+ prog->name, binary_path, err);
+ return libbpf_err_ptr(err);
+ }
+ binary_path = resolved_path;
+ }
+
+ /* USDT manager is instantiated lazily on first USDT attach. It will
+ * be destroyed together with BPF object in bpf_object__close().
+ */
+ if (IS_ERR(obj->usdt_man))
+ return libbpf_ptr(obj->usdt_man);
+ if (!obj->usdt_man) {
+ obj->usdt_man = usdt_manager_new(obj);
+ if (IS_ERR(obj->usdt_man))
+ return libbpf_ptr(obj->usdt_man);
+ }
+
+ usdt_cookie = OPTS_GET(opts, usdt_cookie, 0);
+ link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path,
+ usdt_provider, usdt_name, usdt_cookie);
+ err = libbpf_get_error(link);
+ if (err)
+ return libbpf_err_ptr(err);
+ return link;
+}
+
+static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link)
+{
+ char *path = NULL, *provider = NULL, *name = NULL;
+ const char *sec_name;
+ int n, err;
+
+ sec_name = bpf_program__section_name(prog);
+ if (strcmp(sec_name, "usdt") == 0) {
+ /* no auto-attach for just SEC("usdt") */
+ *link = NULL;
+ return 0;
+ }
+
+ n = sscanf(sec_name, "usdt/%m[^:]:%m[^:]:%m[^:]", &path, &provider, &name);
+ if (n != 3) {
+ pr_warn("invalid section '%s', expected SEC(\"usdt/<path>:<provider>:<name>\")\n",
+ sec_name);
+ err = -EINVAL;
+ } else {
+ *link = bpf_program__attach_usdt(prog, -1 /* any process */, path,
+ provider, name, NULL);
+ err = libbpf_get_error(*link);
+ }
+ free(path);
+ free(provider);
+ free(name);
+ return err;
+}
+
static int determine_tracepoint_id(const char *tp_category,
const char *tp_name)
{
char file[PATH_MAX];
int ret;
- ret = snprintf(file, sizeof(file),
- "/sys/kernel/debug/tracing/events/%s/%s/id",
- tp_category, tp_name);
+ ret = snprintf(file, sizeof(file), "%s/events/%s/%s/id",
+ tracefs_path(), tp_category, tp_name);
if (ret < 0)
return -errno;
if (ret >= sizeof(file)) {
@@ -10694,6 +11040,12 @@ static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_lin
{
char *sec_name, *tp_cat, *tp_name;
+ *link = NULL;
+
+ /* no auto-attach for SEC("tp") or SEC("tracepoint") */
+ if (strcmp(prog->sec_name, "tp") == 0 || strcmp(prog->sec_name, "tracepoint") == 0)
+ return 0;
+
sec_name = strdup(prog->sec_name);
if (!sec_name)
return -ENOMEM;
@@ -10749,20 +11101,34 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *pr
static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
{
static const char *const prefixes[] = {
- "raw_tp/",
- "raw_tracepoint/",
- "raw_tp.w/",
- "raw_tracepoint.w/",
+ "raw_tp",
+ "raw_tracepoint",
+ "raw_tp.w",
+ "raw_tracepoint.w",
};
size_t i;
const char *tp_name = NULL;
+ *link = NULL;
+
for (i = 0; i < ARRAY_SIZE(prefixes); i++) {
- if (str_has_pfx(prog->sec_name, prefixes[i])) {
- tp_name = prog->sec_name + strlen(prefixes[i]);
- break;
- }
+ size_t pfx_len;
+
+ if (!str_has_pfx(prog->sec_name, prefixes[i]))
+ continue;
+
+ pfx_len = strlen(prefixes[i]);
+ /* no auto-attach case of, e.g., SEC("raw_tp") */
+ if (prog->sec_name[pfx_len] == '\0')
+ return 0;
+
+ if (prog->sec_name[pfx_len] != '/')
+ continue;
+
+ tp_name = prog->sec_name + pfx_len + 1;
+ break;
}
+
if (!tp_name) {
pr_warn("prog '%s': invalid section name '%s'\n",
prog->name, prog->sec_name);
@@ -10774,12 +11140,17 @@ static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf
}
/* Common logic for all BPF program types that attach to a btf_id */
-static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog)
+static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog,
+ const struct bpf_trace_opts *opts)
{
+ LIBBPF_OPTS(bpf_link_create_opts, link_opts);
char errmsg[STRERR_BUFSIZE];
struct bpf_link *link;
int prog_fd, pfd;
+ if (!OPTS_VALID(opts, bpf_trace_opts))
+ return libbpf_err_ptr(-EINVAL);
+
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
pr_warn("prog '%s': can't attach before loaded\n", prog->name);
@@ -10791,7 +11162,9 @@ static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *pro
return libbpf_err_ptr(-ENOMEM);
link->detach = &bpf_link__detach_fd;
- pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
+ /* libbpf is smart enough to redirect to BPF_RAW_TRACEPOINT_OPEN on old kernels */
+ link_opts.tracing.cookie = OPTS_GET(opts, cookie, 0);
+ pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), &link_opts);
if (pfd < 0) {
pfd = -errno;
free(link);
@@ -10800,17 +11173,23 @@ static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *pro
return libbpf_err_ptr(pfd);
}
link->fd = pfd;
- return (struct bpf_link *)link;
+ return link;
}
struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog)
{
- return bpf_program__attach_btf_id(prog);
+ return bpf_program__attach_btf_id(prog, NULL);
+}
+
+struct bpf_link *bpf_program__attach_trace_opts(const struct bpf_program *prog,
+ const struct bpf_trace_opts *opts)
+{
+ return bpf_program__attach_btf_id(prog, opts);
}
struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog)
{
- return bpf_program__attach_btf_id(prog);
+ return bpf_program__attach_btf_id(prog, NULL);
}
static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link)
@@ -11031,6 +11410,9 @@ struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
return link;
}
+typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
+ void *private_data);
+
static enum bpf_perf_event_ret
perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
void **copy_mem, size_t *copy_size,
@@ -11079,12 +11461,6 @@ perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
return libbpf_err(ret);
}
-__attribute__((alias("perf_event_read_simple")))
-enum bpf_perf_event_ret
-bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
- void **copy_mem, size_t *copy_size,
- bpf_perf_event_print_t fn, void *private_data);
-
struct perf_buffer;
struct perf_buffer_params {
@@ -11218,12 +11594,11 @@ error:
static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
struct perf_buffer_params *p);
-DEFAULT_VERSION(perf_buffer__new_v0_6_0, perf_buffer__new, LIBBPF_0.6.0)
-struct perf_buffer *perf_buffer__new_v0_6_0(int map_fd, size_t page_cnt,
- perf_buffer_sample_fn sample_cb,
- perf_buffer_lost_fn lost_cb,
- void *ctx,
- const struct perf_buffer_opts *opts)
+struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
+ perf_buffer_sample_fn sample_cb,
+ perf_buffer_lost_fn lost_cb,
+ void *ctx,
+ const struct perf_buffer_opts *opts)
{
struct perf_buffer_params p = {};
struct perf_event_attr attr = {};
@@ -11245,22 +11620,10 @@ struct perf_buffer *perf_buffer__new_v0_6_0(int map_fd, size_t page_cnt,
return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
}
-COMPAT_VERSION(perf_buffer__new_deprecated, perf_buffer__new, LIBBPF_0.0.4)
-struct perf_buffer *perf_buffer__new_deprecated(int map_fd, size_t page_cnt,
- const struct perf_buffer_opts *opts)
-{
- return perf_buffer__new_v0_6_0(map_fd, page_cnt,
- opts ? opts->sample_cb : NULL,
- opts ? opts->lost_cb : NULL,
- opts ? opts->ctx : NULL,
- NULL);
-}
-
-DEFAULT_VERSION(perf_buffer__new_raw_v0_6_0, perf_buffer__new_raw, LIBBPF_0.6.0)
-struct perf_buffer *perf_buffer__new_raw_v0_6_0(int map_fd, size_t page_cnt,
- struct perf_event_attr *attr,
- perf_buffer_event_fn event_cb, void *ctx,
- const struct perf_buffer_raw_opts *opts)
+struct perf_buffer *perf_buffer__new_raw(int map_fd, size_t page_cnt,
+ struct perf_event_attr *attr,
+ perf_buffer_event_fn event_cb, void *ctx,
+ const struct perf_buffer_raw_opts *opts)
{
struct perf_buffer_params p = {};
@@ -11280,20 +11643,6 @@ struct perf_buffer *perf_buffer__new_raw_v0_6_0(int map_fd, size_t page_cnt,
return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
}
-COMPAT_VERSION(perf_buffer__new_raw_deprecated, perf_buffer__new_raw, LIBBPF_0.0.4)
-struct perf_buffer *perf_buffer__new_raw_deprecated(int map_fd, size_t page_cnt,
- const struct perf_buffer_raw_opts *opts)
-{
- LIBBPF_OPTS(perf_buffer_raw_opts, inner_opts,
- .cpu_cnt = opts->cpu_cnt,
- .cpus = opts->cpus,
- .map_keys = opts->map_keys,
- );
-
- return perf_buffer__new_raw_v0_6_0(map_fd, page_cnt, opts->attr,
- opts->event_cb, opts->ctx, &inner_opts);
-}
-
static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
struct perf_buffer_params *p)
{
@@ -11554,6 +11903,22 @@ int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
return cpu_buf->fd;
}
+int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf, size_t *buf_size)
+{
+ struct perf_cpu_buf *cpu_buf;
+
+ if (buf_idx >= pb->cpu_cnt)
+ return libbpf_err(-EINVAL);
+
+ cpu_buf = pb->cpu_bufs[buf_idx];
+ if (!cpu_buf)
+ return libbpf_err(-ENOENT);
+
+ *buf = cpu_buf->base;
+ *buf_size = pb->mmap_size;
+ return 0;
+}
+
/*
* Consume data from perf ring buffer corresponding to slot *buf_idx* in
* PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
@@ -11595,254 +11960,6 @@ int perf_buffer__consume(struct perf_buffer *pb)
return 0;
}
-struct bpf_prog_info_array_desc {
- int array_offset; /* e.g. offset of jited_prog_insns */
- int count_offset; /* e.g. offset of jited_prog_len */
- int size_offset; /* > 0: offset of rec size,
- * < 0: fix size of -size_offset
- */
-};
-
-static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
- [BPF_PROG_INFO_JITED_INSNS] = {
- offsetof(struct bpf_prog_info, jited_prog_insns),
- offsetof(struct bpf_prog_info, jited_prog_len),
- -1,
- },
- [BPF_PROG_INFO_XLATED_INSNS] = {
- offsetof(struct bpf_prog_info, xlated_prog_insns),
- offsetof(struct bpf_prog_info, xlated_prog_len),
- -1,
- },
- [BPF_PROG_INFO_MAP_IDS] = {
- offsetof(struct bpf_prog_info, map_ids),
- offsetof(struct bpf_prog_info, nr_map_ids),
- -(int)sizeof(__u32),
- },
- [BPF_PROG_INFO_JITED_KSYMS] = {
- offsetof(struct bpf_prog_info, jited_ksyms),
- offsetof(struct bpf_prog_info, nr_jited_ksyms),
- -(int)sizeof(__u64),
- },
- [BPF_PROG_INFO_JITED_FUNC_LENS] = {
- offsetof(struct bpf_prog_info, jited_func_lens),
- offsetof(struct bpf_prog_info, nr_jited_func_lens),
- -(int)sizeof(__u32),
- },
- [BPF_PROG_INFO_FUNC_INFO] = {
- offsetof(struct bpf_prog_info, func_info),
- offsetof(struct bpf_prog_info, nr_func_info),
- offsetof(struct bpf_prog_info, func_info_rec_size),
- },
- [BPF_PROG_INFO_LINE_INFO] = {
- offsetof(struct bpf_prog_info, line_info),
- offsetof(struct bpf_prog_info, nr_line_info),
- offsetof(struct bpf_prog_info, line_info_rec_size),
- },
- [BPF_PROG_INFO_JITED_LINE_INFO] = {
- offsetof(struct bpf_prog_info, jited_line_info),
- offsetof(struct bpf_prog_info, nr_jited_line_info),
- offsetof(struct bpf_prog_info, jited_line_info_rec_size),
- },
- [BPF_PROG_INFO_PROG_TAGS] = {
- offsetof(struct bpf_prog_info, prog_tags),
- offsetof(struct bpf_prog_info, nr_prog_tags),
- -(int)sizeof(__u8) * BPF_TAG_SIZE,
- },
-
-};
-
-static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
- int offset)
-{
- __u32 *array = (__u32 *)info;
-
- if (offset >= 0)
- return array[offset / sizeof(__u32)];
- return -(int)offset;
-}
-
-static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
- int offset)
-{
- __u64 *array = (__u64 *)info;
-
- if (offset >= 0)
- return array[offset / sizeof(__u64)];
- return -(int)offset;
-}
-
-static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
- __u32 val)
-{
- __u32 *array = (__u32 *)info;
-
- if (offset >= 0)
- array[offset / sizeof(__u32)] = val;
-}
-
-static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
- __u64 val)
-{
- __u64 *array = (__u64 *)info;
-
- if (offset >= 0)
- array[offset / sizeof(__u64)] = val;
-}
-
-struct bpf_prog_info_linear *
-bpf_program__get_prog_info_linear(int fd, __u64 arrays)
-{
- struct bpf_prog_info_linear *info_linear;
- struct bpf_prog_info info = {};
- __u32 info_len = sizeof(info);
- __u32 data_len = 0;
- int i, err;
- void *ptr;
-
- if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
- return libbpf_err_ptr(-EINVAL);
-
- /* step 1: get array dimensions */
- err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
- if (err) {
- pr_debug("can't get prog info: %s", strerror(errno));
- return libbpf_err_ptr(-EFAULT);
- }
-
- /* step 2: calculate total size of all arrays */
- for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
- bool include_array = (arrays & (1UL << i)) > 0;
- struct bpf_prog_info_array_desc *desc;
- __u32 count, size;
-
- desc = bpf_prog_info_array_desc + i;
-
- /* kernel is too old to support this field */
- if (info_len < desc->array_offset + sizeof(__u32) ||
- info_len < desc->count_offset + sizeof(__u32) ||
- (desc->size_offset > 0 && info_len < desc->size_offset))
- include_array = false;
-
- if (!include_array) {
- arrays &= ~(1UL << i); /* clear the bit */
- continue;
- }
-
- count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
- size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
-
- data_len += count * size;
- }
-
- /* step 3: allocate continuous memory */
- data_len = roundup(data_len, sizeof(__u64));
- info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
- if (!info_linear)
- return libbpf_err_ptr(-ENOMEM);
-
- /* step 4: fill data to info_linear->info */
- info_linear->arrays = arrays;
- memset(&info_linear->info, 0, sizeof(info));
- ptr = info_linear->data;
-
- for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
- struct bpf_prog_info_array_desc *desc;
- __u32 count, size;
-
- if ((arrays & (1UL << i)) == 0)
- continue;
-
- desc = bpf_prog_info_array_desc + i;
- count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
- size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
- bpf_prog_info_set_offset_u32(&info_linear->info,
- desc->count_offset, count);
- bpf_prog_info_set_offset_u32(&info_linear->info,
- desc->size_offset, size);
- bpf_prog_info_set_offset_u64(&info_linear->info,
- desc->array_offset,
- ptr_to_u64(ptr));
- ptr += count * size;
- }
-
- /* step 5: call syscall again to get required arrays */
- err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
- if (err) {
- pr_debug("can't get prog info: %s", strerror(errno));
- free(info_linear);
- return libbpf_err_ptr(-EFAULT);
- }
-
- /* step 6: verify the data */
- for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
- struct bpf_prog_info_array_desc *desc;
- __u32 v1, v2;
-
- if ((arrays & (1UL << i)) == 0)
- continue;
-
- desc = bpf_prog_info_array_desc + i;
- v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
- v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
- desc->count_offset);
- if (v1 != v2)
- pr_warn("%s: mismatch in element count\n", __func__);
-
- v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
- v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
- desc->size_offset);
- if (v1 != v2)
- pr_warn("%s: mismatch in rec size\n", __func__);
- }
-
- /* step 7: update info_len and data_len */
- info_linear->info_len = sizeof(struct bpf_prog_info);
- info_linear->data_len = data_len;
-
- return info_linear;
-}
-
-void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
-{
- int i;
-
- for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
- struct bpf_prog_info_array_desc *desc;
- __u64 addr, offs;
-
- if ((info_linear->arrays & (1UL << i)) == 0)
- continue;
-
- desc = bpf_prog_info_array_desc + i;
- addr = bpf_prog_info_read_offset_u64(&info_linear->info,
- desc->array_offset);
- offs = addr - ptr_to_u64(info_linear->data);
- bpf_prog_info_set_offset_u64(&info_linear->info,
- desc->array_offset, offs);
- }
-}
-
-void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
-{
- int i;
-
- for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
- struct bpf_prog_info_array_desc *desc;
- __u64 addr, offs;
-
- if ((info_linear->arrays & (1UL << i)) == 0)
- continue;
-
- desc = bpf_prog_info_array_desc + i;
- offs = bpf_prog_info_read_offset_u64(&info_linear->info,
- desc->array_offset);
- addr = offs + ptr_to_u64(info_linear->data);
- bpf_prog_info_set_offset_u64(&info_linear->info,
- desc->array_offset, addr);
- }
-}
-
int bpf_program__set_attach_target(struct bpf_program *prog,
int attach_prog_fd,
const char *attach_func_name)
@@ -12211,7 +12328,7 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
struct bpf_program *prog = *s->progs[i].prog;
struct bpf_link **link = s->progs[i].link;
- if (!prog->load)
+ if (!prog->autoload)
continue;
/* auto-attaching not supported for this program */
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 05dde85e19a6..61493c4cddac 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -51,6 +51,42 @@ enum libbpf_errno {
LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size);
+/**
+ * @brief **libbpf_bpf_attach_type_str()** converts the provided attach type
+ * value into a textual representation.
+ * @param t The attach type.
+ * @return Pointer to a static string identifying the attach type. NULL is
+ * returned for unknown **bpf_attach_type** values.
+ */
+LIBBPF_API const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t);
+
+/**
+ * @brief **libbpf_bpf_link_type_str()** converts the provided link type value
+ * into a textual representation.
+ * @param t The link type.
+ * @return Pointer to a static string identifying the link type. NULL is
+ * returned for unknown **bpf_link_type** values.
+ */
+LIBBPF_API const char *libbpf_bpf_link_type_str(enum bpf_link_type t);
+
+/**
+ * @brief **libbpf_bpf_map_type_str()** converts the provided map type value
+ * into a textual representation.
+ * @param t The map type.
+ * @return Pointer to a static string identifying the map type. NULL is
+ * returned for unknown **bpf_map_type** values.
+ */
+LIBBPF_API const char *libbpf_bpf_map_type_str(enum bpf_map_type t);
+
+/**
+ * @brief **libbpf_bpf_prog_type_str()** converts the provided program type
+ * value into a textual representation.
+ * @param t The program type.
+ * @return Pointer to a static string identifying the program type. NULL is
+ * returned for unknown **bpf_prog_type** values.
+ */
+LIBBPF_API const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t);
+
enum libbpf_print_level {
LIBBPF_WARN,
LIBBPF_INFO,
@@ -65,13 +101,8 @@ LIBBPF_API libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn);
/* Hide internal to user */
struct bpf_object;
-struct bpf_object_open_attr {
- const char *file;
- enum bpf_prog_type prog_type;
-};
-
struct bpf_object_open_opts {
- /* size of this struct, for forward/backward compatiblity */
+ /* size of this struct, for forward/backward compatibility */
size_t sz;
/* object name override, if provided:
* - for object open from file, this will override setting object
@@ -82,21 +113,12 @@ struct bpf_object_open_opts {
const char *object_name;
/* parse map definitions non-strictly, allowing extra attributes/data */
bool relaxed_maps;
- /* DEPRECATED: handle CO-RE relocations non-strictly, allowing failures.
- * Value is ignored. Relocations always are processed non-strictly.
- * Non-relocatable instructions are replaced with invalid ones to
- * prevent accidental errors.
- * */
- LIBBPF_DEPRECATED_SINCE(0, 6, "field has no effect")
- bool relaxed_core_relocs;
/* maps that set the 'pinning' attribute in their definition will have
* their pin_path attribute set to a file in this directory, and be
* auto-pinned to that path on load; defaults to "/sys/fs/bpf".
*/
const char *pin_root_path;
-
- LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__set_attach_target() on each individual bpf_program")
- __u32 attach_prog_fd;
+ long :0;
/* Additional kernel config content that augments and overrides
* system Kconfig for CONFIG_xxx externs.
*/
@@ -179,20 +201,10 @@ LIBBPF_API struct bpf_object *
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
const struct bpf_object_open_opts *opts);
-/* deprecated bpf_object__open variants */
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__open_mem() instead")
-LIBBPF_API struct bpf_object *
-bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
- const char *name);
-LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__open_file() instead")
-LIBBPF_API struct bpf_object *
-bpf_object__open_xattr(struct bpf_object_open_attr *attr);
+/* Load/unload object into/from kernel */
+LIBBPF_API int bpf_object__load(struct bpf_object *obj);
-enum libbpf_pin_type {
- LIBBPF_PIN_NONE,
- /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
- LIBBPF_PIN_BY_NAME,
-};
+LIBBPF_API void bpf_object__close(struct bpf_object *object);
/* pin_maps and unpin_maps can both be called with a NULL path, in which case
* they will use the pin_path attribute of each map (and ignore all maps that
@@ -206,20 +218,6 @@ LIBBPF_API int bpf_object__pin_programs(struct bpf_object *obj,
LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj,
const char *path);
LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path);
-LIBBPF_API void bpf_object__close(struct bpf_object *object);
-
-struct bpf_object_load_attr {
- struct bpf_object *obj;
- int log_level;
- const char *target_btf_path;
-};
-
-/* Load/unload object into/from kernel */
-LIBBPF_API int bpf_object__load(struct bpf_object *obj);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__load() instead")
-LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr);
-LIBBPF_DEPRECATED_SINCE(0, 6, "bpf_object__unload() is deprecated, use bpf_object__close() instead")
-LIBBPF_API int bpf_object__unload(struct bpf_object *obj);
LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj);
LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj);
@@ -229,29 +227,10 @@ struct btf;
LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj);
LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
-LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__find_program_by_name() instead")
-LIBBPF_API struct bpf_program *
-bpf_object__find_program_by_title(const struct bpf_object *obj,
- const char *title);
LIBBPF_API struct bpf_program *
bpf_object__find_program_by_name(const struct bpf_object *obj,
const char *name);
-LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "track bpf_objects in application code instead")
-struct bpf_object *bpf_object__next(struct bpf_object *prev);
-#define bpf_object__for_each_safe(pos, tmp) \
- for ((pos) = bpf_object__next(NULL), \
- (tmp) = bpf_object__next(pos); \
- (pos) != NULL; \
- (pos) = (tmp), (tmp) = bpf_object__next(tmp))
-
-typedef void (*bpf_object_clear_priv_t)(struct bpf_object *, void *);
-LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
-LIBBPF_API int bpf_object__set_priv(struct bpf_object *obj, void *priv,
- bpf_object_clear_priv_t clear_priv);
-LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
-LIBBPF_API void *bpf_object__priv(const struct bpf_object *prog);
-
LIBBPF_API int
libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
enum bpf_attach_type *expected_attach_type);
@@ -262,9 +241,7 @@ LIBBPF_API int libbpf_find_vmlinux_btf_id(const char *name,
/* Accessors of bpf_program */
struct bpf_program;
-LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__next_program() instead")
-struct bpf_program *bpf_program__next(struct bpf_program *prog,
- const struct bpf_object *obj);
+
LIBBPF_API struct bpf_program *
bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prog);
@@ -273,33 +250,17 @@ bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prog)
(pos) != NULL; \
(pos) = bpf_object__next_program((obj), (pos)))
-LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__prev_program() instead")
-struct bpf_program *bpf_program__prev(struct bpf_program *prog,
- const struct bpf_object *obj);
LIBBPF_API struct bpf_program *
bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *prog);
-typedef void (*bpf_program_clear_priv_t)(struct bpf_program *, void *);
-
-LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
-LIBBPF_API int bpf_program__set_priv(struct bpf_program *prog, void *priv,
- bpf_program_clear_priv_t clear_priv);
-LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
-LIBBPF_API void *bpf_program__priv(const struct bpf_program *prog);
LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog,
__u32 ifindex);
LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog);
LIBBPF_API const char *bpf_program__section_name(const struct bpf_program *prog);
-LIBBPF_API LIBBPF_DEPRECATED("BPF program title is confusing term; please use bpf_program__section_name() instead")
-const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy);
LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog);
LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload);
-/* returns program size in bytes */
-LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insn_cnt() instead")
-LIBBPF_API size_t bpf_program__size(const struct bpf_program *prog);
-
struct bpf_insn;
/**
@@ -323,6 +284,24 @@ struct bpf_insn;
* different.
*/
LIBBPF_API const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog);
+
+/**
+ * @brief **bpf_program__set_insns()** can set BPF program's underlying
+ * BPF instructions.
+ *
+ * WARNING: This is a very advanced libbpf API and users need to know
+ * what they are doing. This should be used from prog_prepare_load_fn
+ * callback only.
+ *
+ * @param prog BPF program for which to return instructions
+ * @param new_insns a pointer to an array of BPF instructions
+ * @param new_insn_cnt number of `struct bpf_insn`'s that form
+ * specified BPF program
+ * @return 0, on success; negative error code, otherwise
+ */
+LIBBPF_API int bpf_program__set_insns(struct bpf_program *prog,
+ struct bpf_insn *new_insns, size_t new_insn_cnt);
+
/**
* @brief **bpf_program__insn_cnt()** returns number of `struct bpf_insn`'s
* that form specified BPF program.
@@ -334,17 +313,7 @@ LIBBPF_API const struct bpf_insn *bpf_program__insns(const struct bpf_program *p
*/
LIBBPF_API size_t bpf_program__insn_cnt(const struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 6, "use bpf_object__load() instead")
-LIBBPF_API int bpf_program__load(struct bpf_program *prog, const char *license, __u32 kern_version);
LIBBPF_API int bpf_program__fd(const struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated")
-LIBBPF_API int bpf_program__pin_instance(struct bpf_program *prog,
- const char *path,
- int instance);
-LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated")
-LIBBPF_API int bpf_program__unpin_instance(struct bpf_program *prog,
- const char *path,
- int instance);
/**
* @brief **bpf_program__pin()** pins the BPF program to a file
@@ -378,7 +347,31 @@ struct bpf_link;
LIBBPF_API struct bpf_link *bpf_link__open(const char *path);
LIBBPF_API int bpf_link__fd(const struct bpf_link *link);
LIBBPF_API const char *bpf_link__pin_path(const struct bpf_link *link);
+/**
+ * @brief **bpf_link__pin()** pins the BPF link to a file
+ * in the BPF FS specified by a path. This increments the links
+ * reference count, allowing it to stay loaded after the process
+ * which loaded it has exited.
+ *
+ * @param link BPF link to pin, must already be loaded
+ * @param path file path in a BPF file system
+ * @return 0, on success; negative error code, otherwise
+ */
+
LIBBPF_API int bpf_link__pin(struct bpf_link *link, const char *path);
+
+/**
+ * @brief **bpf_link__unpin()** unpins the BPF link from a file
+ * in the BPFFS specified by a path. This decrements the links
+ * reference count.
+ *
+ * The file pinning the BPF link can also be unlinked by a different
+ * process in which case this function will return an error.
+ *
+ * @param prog BPF program to unpin
+ * @param path file path to the pin in a BPF file system
+ * @return 0, on success; negative error code, otherwise
+ */
LIBBPF_API int bpf_link__unpin(struct bpf_link *link);
LIBBPF_API int bpf_link__update_program(struct bpf_link *link,
struct bpf_program *prog);
@@ -386,6 +379,22 @@ LIBBPF_API void bpf_link__disconnect(struct bpf_link *link);
LIBBPF_API int bpf_link__detach(struct bpf_link *link);
LIBBPF_API int bpf_link__destroy(struct bpf_link *link);
+/**
+ * @brief **bpf_program__attach()** is a generic function for attaching
+ * a BPF program based on auto-detection of program type, attach type,
+ * and extra paremeters, where applicable.
+ *
+ * @param prog BPF program to attach
+ * @return Reference to the newly created BPF link; or NULL is returned on error,
+ * error code is stored in errno
+ *
+ * This is supported for:
+ * - kprobe/kretprobe (depends on SEC() definition)
+ * - uprobe/uretprobe (depends on SEC() definition)
+ * - tracepoint
+ * - raw tracepoint
+ * - tracing programs (typed raw TP/fentry/fexit/fmod_ret)
+ */
LIBBPF_API struct bpf_link *
bpf_program__attach(const struct bpf_program *prog);
@@ -448,6 +457,52 @@ bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
const char *pattern,
const struct bpf_kprobe_multi_opts *opts);
+struct bpf_ksyscall_opts {
+ /* size of this struct, for forward/backward compatiblity */
+ size_t sz;
+ /* custom user-provided value fetchable through bpf_get_attach_cookie() */
+ __u64 bpf_cookie;
+ /* attach as return probe? */
+ bool retprobe;
+ size_t :0;
+};
+#define bpf_ksyscall_opts__last_field retprobe
+
+/**
+ * @brief **bpf_program__attach_ksyscall()** attaches a BPF program
+ * to kernel syscall handler of a specified syscall. Optionally it's possible
+ * to request to install retprobe that will be triggered at syscall exit. It's
+ * also possible to associate BPF cookie (though options).
+ *
+ * Libbpf automatically will determine correct full kernel function name,
+ * which depending on system architecture and kernel version/configuration
+ * could be of the form __<arch>_sys_<syscall> or __se_sys_<syscall>, and will
+ * attach specified program using kprobe/kretprobe mechanism.
+ *
+ * **bpf_program__attach_ksyscall()** is an API counterpart of declarative
+ * **SEC("ksyscall/<syscall>")** annotation of BPF programs.
+ *
+ * At the moment **SEC("ksyscall")** and **bpf_program__attach_ksyscall()** do
+ * not handle all the calling convention quirks for mmap(), clone() and compat
+ * syscalls. It also only attaches to "native" syscall interfaces. If host
+ * system supports compat syscalls or defines 32-bit syscalls in 64-bit
+ * kernel, such syscall interfaces won't be attached to by libbpf.
+ *
+ * These limitations may or may not change in the future. Therefore it is
+ * recommended to use SEC("kprobe") for these syscalls or if working with
+ * compat and 32-bit interfaces is required.
+ *
+ * @param prog BPF program to attach
+ * @param syscall_name Symbolic name of the syscall (e.g., "bpf")
+ * @param opts Additional options (see **struct bpf_ksyscall_opts**)
+ * @return Reference to the newly created BPF link; or NULL is returned on
+ * error, error code is stored in errno
+ */
+LIBBPF_API struct bpf_link *
+bpf_program__attach_ksyscall(const struct bpf_program *prog,
+ const char *syscall_name,
+ const struct bpf_ksyscall_opts *opts);
+
struct bpf_uprobe_opts {
/* size of this struct, for forward/backward compatiblity */
size_t sz;
@@ -459,9 +514,17 @@ struct bpf_uprobe_opts {
__u64 bpf_cookie;
/* uprobe is return probe, invoked at function return time */
bool retprobe;
+ /* Function name to attach to. Could be an unqualified ("abc") or library-qualified
+ * "abc@LIBXYZ" name. To specify function entry, func_name should be set while
+ * func_offset argument to bpf_prog__attach_uprobe_opts() should be 0. To trace an
+ * offset within a function, specify func_name and use func_offset argument to specify
+ * offset within the function. Shared library functions must specify the shared library
+ * binary_path.
+ */
+ const char *func_name;
size_t :0;
};
-#define bpf_uprobe_opts__last_field retprobe
+#define bpf_uprobe_opts__last_field func_name
/**
* @brief **bpf_program__attach_uprobe()** attaches a BPF program
@@ -503,6 +566,37 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
const char *binary_path, size_t func_offset,
const struct bpf_uprobe_opts *opts);
+struct bpf_usdt_opts {
+ /* size of this struct, for forward/backward compatibility */
+ size_t sz;
+ /* custom user-provided value accessible through usdt_cookie() */
+ __u64 usdt_cookie;
+ size_t :0;
+};
+#define bpf_usdt_opts__last_field usdt_cookie
+
+/**
+ * @brief **bpf_program__attach_usdt()** is just like
+ * bpf_program__attach_uprobe_opts() except it covers USDT (User-space
+ * Statically Defined Tracepoint) attachment, instead of attaching to
+ * user-space function entry or exit.
+ *
+ * @param prog BPF program to attach
+ * @param pid Process ID to attach the uprobe to, 0 for self (own process),
+ * -1 for all processes
+ * @param binary_path Path to binary that contains provided USDT probe
+ * @param usdt_provider USDT provider name
+ * @param usdt_name USDT probe name
+ * @param opts Options for altering program attachment
+ * @return Reference to the newly created BPF link; or NULL is returned on error,
+ * error code is stored in errno
+ */
+LIBBPF_API struct bpf_link *
+bpf_program__attach_usdt(const struct bpf_program *prog,
+ pid_t pid, const char *binary_path,
+ const char *usdt_provider, const char *usdt_name,
+ const struct bpf_usdt_opts *opts);
+
struct bpf_tracepoint_opts {
/* size of this struct, for forward/backward compatiblity */
size_t sz;
@@ -524,9 +618,21 @@ bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
LIBBPF_API struct bpf_link *
bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
const char *tp_name);
+
+struct bpf_trace_opts {
+ /* size of this struct, for forward/backward compatibility */
+ size_t sz;
+ /* custom user-provided value fetchable through bpf_get_attach_cookie() */
+ __u64 cookie;
+};
+#define bpf_trace_opts__last_field cookie
+
LIBBPF_API struct bpf_link *
bpf_program__attach_trace(const struct bpf_program *prog);
LIBBPF_API struct bpf_link *
+bpf_program__attach_trace_opts(const struct bpf_program *prog, const struct bpf_trace_opts *opts);
+
+LIBBPF_API struct bpf_link *
bpf_program__attach_lsm(const struct bpf_program *prog);
LIBBPF_API struct bpf_link *
bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd);
@@ -553,106 +659,38 @@ LIBBPF_API struct bpf_link *
bpf_program__attach_iter(const struct bpf_program *prog,
const struct bpf_iter_attach_opts *opts);
-/*
- * Libbpf allows callers to adjust BPF programs before being loaded
- * into kernel. One program in an object file can be transformed into
- * multiple variants to be attached to different hooks.
- *
- * bpf_program_prep_t, bpf_program__set_prep and bpf_program__nth_fd
- * form an API for this purpose.
- *
- * - bpf_program_prep_t:
- * Defines a 'preprocessor', which is a caller defined function
- * passed to libbpf through bpf_program__set_prep(), and will be
- * called before program is loaded. The processor should adjust
- * the program one time for each instance according to the instance id
- * passed to it.
- *
- * - bpf_program__set_prep:
- * Attaches a preprocessor to a BPF program. The number of instances
- * that should be created is also passed through this function.
- *
- * - bpf_program__nth_fd:
- * After the program is loaded, get resulting FD of a given instance
- * of the BPF program.
- *
- * If bpf_program__set_prep() is not used, the program would be loaded
- * without adjustment during bpf_object__load(). The program has only
- * one instance. In this case bpf_program__fd(prog) is equal to
- * bpf_program__nth_fd(prog, 0).
- */
-struct bpf_prog_prep_result {
- /*
- * If not NULL, load new instruction array.
- * If set to NULL, don't load this instance.
- */
- struct bpf_insn *new_insn_ptr;
- int new_insn_cnt;
-
- /* If not NULL, result FD is written to it. */
- int *pfd;
-};
+LIBBPF_API enum bpf_prog_type bpf_program__type(const struct bpf_program *prog);
-/*
- * Parameters of bpf_program_prep_t:
- * - prog: The bpf_program being loaded.
- * - n: Index of instance being generated.
- * - insns: BPF instructions array.
- * - insns_cnt:Number of instructions in insns.
- * - res: Output parameter, result of transformation.
+/**
+ * @brief **bpf_program__set_type()** sets the program
+ * type of the passed BPF program.
+ * @param prog BPF program to set the program type for
+ * @param type program type to set the BPF map to have
+ * @return error code; or 0 if no error. An error occurs
+ * if the object is already loaded.
*
- * Return value:
- * - Zero: pre-processing success.
- * - Non-zero: pre-processing error, stop loading.
- */
-typedef int (*bpf_program_prep_t)(struct bpf_program *prog, int n,
- struct bpf_insn *insns, int insns_cnt,
- struct bpf_prog_prep_result *res);
-
-LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insns() for getting bpf_program instructions")
-LIBBPF_API int bpf_program__set_prep(struct bpf_program *prog, int nr_instance,
- bpf_program_prep_t prep);
-
-LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated")
-LIBBPF_API int bpf_program__nth_fd(const struct bpf_program *prog, int n);
-
-/*
- * Adjust type of BPF program. Default is kprobe.
+ * This must be called before the BPF object is loaded,
+ * otherwise it has no effect and an error is returned.
*/
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
-LIBBPF_API int bpf_program__set_socket_filter(struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
-LIBBPF_API int bpf_program__set_tracepoint(struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
-LIBBPF_API int bpf_program__set_raw_tracepoint(struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
-LIBBPF_API int bpf_program__set_kprobe(struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
-LIBBPF_API int bpf_program__set_lsm(struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
-LIBBPF_API int bpf_program__set_sched_cls(struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
-LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
-LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
-LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
-LIBBPF_API int bpf_program__set_tracing(struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
-LIBBPF_API int bpf_program__set_struct_ops(struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
-LIBBPF_API int bpf_program__set_extension(struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
-LIBBPF_API int bpf_program__set_sk_lookup(struct bpf_program *prog);
-
-LIBBPF_API enum bpf_prog_type bpf_program__type(const struct bpf_program *prog);
-LIBBPF_API void bpf_program__set_type(struct bpf_program *prog,
- enum bpf_prog_type type);
+LIBBPF_API int bpf_program__set_type(struct bpf_program *prog,
+ enum bpf_prog_type type);
LIBBPF_API enum bpf_attach_type
bpf_program__expected_attach_type(const struct bpf_program *prog);
-LIBBPF_API void
+
+/**
+ * @brief **bpf_program__set_expected_attach_type()** sets the
+ * attach type of the passed BPF program. This is used for
+ * auto-detection of attachment when programs are loaded.
+ * @param prog BPF program to set the attach type for
+ * @param type attach type to set the BPF map to have
+ * @return error code; or 0 if no error. An error occurs
+ * if the object is already loaded.
+ *
+ * This must be called before the BPF object is loaded,
+ * otherwise it has no effect and an error is returned.
+ */
+LIBBPF_API int
bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type);
@@ -668,51 +706,21 @@ LIBBPF_API int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_le
LIBBPF_API const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size);
LIBBPF_API int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size);
+/**
+ * @brief **bpf_program__set_attach_target()** sets BTF-based attach target
+ * for supported BPF program types:
+ * - BTF-aware raw tracepoints (tp_btf);
+ * - fentry/fexit/fmod_ret;
+ * - lsm;
+ * - freplace.
+ * @param prog BPF program to set the attach type for
+ * @param type attach type to set the BPF map to have
+ * @return error code; or 0 if no error occurred.
+ */
LIBBPF_API int
bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd,
const char *attach_func_name);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
-LIBBPF_API bool bpf_program__is_socket_filter(const struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
-LIBBPF_API bool bpf_program__is_tracepoint(const struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
-LIBBPF_API bool bpf_program__is_raw_tracepoint(const struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
-LIBBPF_API bool bpf_program__is_kprobe(const struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
-LIBBPF_API bool bpf_program__is_lsm(const struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
-LIBBPF_API bool bpf_program__is_sched_cls(const struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
-LIBBPF_API bool bpf_program__is_sched_act(const struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
-LIBBPF_API bool bpf_program__is_xdp(const struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
-LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
-LIBBPF_API bool bpf_program__is_tracing(const struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
-LIBBPF_API bool bpf_program__is_struct_ops(const struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
-LIBBPF_API bool bpf_program__is_extension(const struct bpf_program *prog);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead")
-LIBBPF_API bool bpf_program__is_sk_lookup(const struct bpf_program *prog);
-
-/*
- * No need for __attribute__((packed)), all members of 'bpf_map_def'
- * are all aligned. In addition, using __attribute__((packed))
- * would trigger a -Wpacked warning message, and lead to an error
- * if -Werror is set.
- */
-struct bpf_map_def {
- unsigned int type;
- unsigned int key_size;
- unsigned int value_size;
- unsigned int max_entries;
- unsigned int map_flags;
-};
-
/**
* @brief **bpf_object__find_map_by_name()** returns BPF map of
* the given name, if it exists within the passed BPF object
@@ -727,16 +735,6 @@ bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name);
LIBBPF_API int
bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name);
-/*
- * Get bpf_map through the offset of corresponding struct bpf_map_def
- * in the BPF object file.
- */
-LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__find_map_by_name() instead")
-struct bpf_map *
-bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset);
-
-LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__next_map() instead")
-struct bpf_map *bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj);
LIBBPF_API struct bpf_map *
bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *map);
@@ -746,12 +744,32 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *map);
(pos) = bpf_object__next_map((obj), (pos)))
#define bpf_map__for_each bpf_object__for_each_map
-LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__prev_map() instead")
-struct bpf_map *bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj);
LIBBPF_API struct bpf_map *
bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map);
/**
+ * @brief **bpf_map__set_autocreate()** sets whether libbpf has to auto-create
+ * BPF map during BPF object load phase.
+ * @param map the BPF map instance
+ * @param autocreate whether to create BPF map during BPF object load
+ * @return 0 on success; -EBUSY if BPF object was already loaded
+ *
+ * **bpf_map__set_autocreate()** allows to opt-out from libbpf auto-creating
+ * BPF map. By default, libbpf will attempt to create every single BPF map
+ * defined in BPF object file using BPF_MAP_CREATE command of bpf() syscall
+ * and fill in map FD in BPF instructions.
+ *
+ * This API allows to opt-out of this process for specific map instance. This
+ * can be useful if host kernel doesn't support such BPF map type or used
+ * combination of flags and user application wants to avoid creating such
+ * a map in the first place. User is still responsible to make sure that their
+ * BPF-side code that expects to use such missing BPF map is recognized by BPF
+ * verifier as dead code, otherwise BPF verifier will reject such BPF program.
+ */
+LIBBPF_API int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate);
+LIBBPF_API bool bpf_map__autocreate(const struct bpf_map *map);
+
+/**
* @brief **bpf_map__fd()** gets the file descriptor of the passed
* BPF map
* @param map the BPF map instance
@@ -759,9 +777,6 @@ bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map);
*/
LIBBPF_API int bpf_map__fd(const struct bpf_map *map);
LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
-/* get map definition */
-LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 8, "use appropriate getters or setters instead")
-const struct bpf_map_def *bpf_map__def(const struct bpf_map *map);
/* get map name */
LIBBPF_API const char *bpf_map__name(const struct bpf_map *map);
/* get/set map type */
@@ -770,8 +785,6 @@ LIBBPF_API int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type);
/* get/set map size (max_entries) */
LIBBPF_API __u32 bpf_map__max_entries(const struct bpf_map *map);
LIBBPF_API int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_map__set_max_entries() instead")
-LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries);
/* get/set map flags */
LIBBPF_API __u32 bpf_map__map_flags(const struct bpf_map *map);
LIBBPF_API int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags);
@@ -794,17 +807,9 @@ LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
LIBBPF_API __u64 bpf_map__map_extra(const struct bpf_map *map);
LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra);
-typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
-LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
-LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
- bpf_map_clear_priv_t clear_priv);
-LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated")
-LIBBPF_API void *bpf_map__priv(const struct bpf_map *map);
LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
const void *data, size_t size);
LIBBPF_API const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_map__type() instead")
-LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map);
/**
* @brief **bpf_map__is_internal()** tells the caller whether or not the
@@ -824,63 +829,108 @@ LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd);
LIBBPF_API struct bpf_map *bpf_map__inner_map(struct bpf_map *map);
/**
- * @brief **libbpf_get_error()** extracts the error code from the passed
- * pointer
- * @param ptr pointer returned from libbpf API function
- * @return error code; or 0 if no error occured
- *
- * Many libbpf API functions which return pointers have logic to encode error
- * codes as pointers, and do not return NULL. Meaning **libbpf_get_error()**
- * should be used on the return value from these functions immediately after
- * calling the API function, with no intervening calls that could clobber the
- * `errno` variable. Consult the individual functions documentation to verify
- * if this logic applies should be used.
+ * @brief **bpf_map__lookup_elem()** allows to lookup BPF map value
+ * corresponding to provided key.
+ * @param map BPF map to lookup element in
+ * @param key pointer to memory containing bytes of the key used for lookup
+ * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size**
+ * @param value pointer to memory in which looked up value will be stored
+ * @param value_sz size in byte of value data memory; it has to match BPF map
+ * definition's **value_size**. For per-CPU BPF maps value size has to be
+ * a product of BPF map value size and number of possible CPUs in the system
+ * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for
+ * per-CPU values value size has to be aligned up to closest 8 bytes for
+ * alignment reasons, so expected size is: `round_up(value_size, 8)
+ * * libbpf_num_possible_cpus()`.
+ * @flags extra flags passed to kernel for this operation
+ * @return 0, on success; negative error, otherwise
*
- * For these API functions, if `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)`
- * is enabled, NULL is returned on error instead.
- *
- * If ptr is NULL, then errno should be already set by the failing
- * API, because libbpf never returns NULL on success and it now always
- * sets errno on error.
- *
- * Example usage:
- *
- * struct perf_buffer *pb;
+ * **bpf_map__lookup_elem()** is high-level equivalent of
+ * **bpf_map_lookup_elem()** API with added check for key and value size.
+ */
+LIBBPF_API int bpf_map__lookup_elem(const struct bpf_map *map,
+ const void *key, size_t key_sz,
+ void *value, size_t value_sz, __u64 flags);
+
+/**
+ * @brief **bpf_map__update_elem()** allows to insert or update value in BPF
+ * map that corresponds to provided key.
+ * @param map BPF map to insert to or update element in
+ * @param key pointer to memory containing bytes of the key
+ * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size**
+ * @param value pointer to memory containing bytes of the value
+ * @param value_sz size in byte of value data memory; it has to match BPF map
+ * definition's **value_size**. For per-CPU BPF maps value size has to be
+ * a product of BPF map value size and number of possible CPUs in the system
+ * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for
+ * per-CPU values value size has to be aligned up to closest 8 bytes for
+ * alignment reasons, so expected size is: `round_up(value_size, 8)
+ * * libbpf_num_possible_cpus()`.
+ * @flags extra flags passed to kernel for this operation
+ * @return 0, on success; negative error, otherwise
*
- * pb = perf_buffer__new(bpf_map__fd(obj->maps.events), PERF_BUFFER_PAGES, &opts);
- * err = libbpf_get_error(pb);
- * if (err) {
- * pb = NULL;
- * fprintf(stderr, "failed to open perf buffer: %d\n", err);
- * goto cleanup;
- * }
+ * **bpf_map__update_elem()** is high-level equivalent of
+ * **bpf_map_update_elem()** API with added check for key and value size.
*/
-LIBBPF_API long libbpf_get_error(const void *ptr);
+LIBBPF_API int bpf_map__update_elem(const struct bpf_map *map,
+ const void *key, size_t key_sz,
+ const void *value, size_t value_sz, __u64 flags);
-struct bpf_prog_load_attr {
- const char *file;
- enum bpf_prog_type prog_type;
- enum bpf_attach_type expected_attach_type;
- int ifindex;
- int log_level;
- int prog_flags;
-};
+/**
+ * @brief **bpf_map__delete_elem()** allows to delete element in BPF map that
+ * corresponds to provided key.
+ * @param map BPF map to delete element from
+ * @param key pointer to memory containing bytes of the key
+ * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size**
+ * @flags extra flags passed to kernel for this operation
+ * @return 0, on success; negative error, otherwise
+ *
+ * **bpf_map__delete_elem()** is high-level equivalent of
+ * **bpf_map_delete_elem()** API with added check for key size.
+ */
+LIBBPF_API int bpf_map__delete_elem(const struct bpf_map *map,
+ const void *key, size_t key_sz, __u64 flags);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__open() and bpf_object__load() instead")
-LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
- struct bpf_object **pobj, int *prog_fd);
-LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__open() and bpf_object__load() instead")
-LIBBPF_API int bpf_prog_load_deprecated(const char *file, enum bpf_prog_type type,
- struct bpf_object **pobj, int *prog_fd);
+/**
+ * @brief **bpf_map__lookup_and_delete_elem()** allows to lookup BPF map value
+ * corresponding to provided key and atomically delete it afterwards.
+ * @param map BPF map to lookup element in
+ * @param key pointer to memory containing bytes of the key used for lookup
+ * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size**
+ * @param value pointer to memory in which looked up value will be stored
+ * @param value_sz size in byte of value data memory; it has to match BPF map
+ * definition's **value_size**. For per-CPU BPF maps value size has to be
+ * a product of BPF map value size and number of possible CPUs in the system
+ * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for
+ * per-CPU values value size has to be aligned up to closest 8 bytes for
+ * alignment reasons, so expected size is: `round_up(value_size, 8)
+ * * libbpf_num_possible_cpus()`.
+ * @flags extra flags passed to kernel for this operation
+ * @return 0, on success; negative error, otherwise
+ *
+ * **bpf_map__lookup_and_delete_elem()** is high-level equivalent of
+ * **bpf_map_lookup_and_delete_elem()** API with added check for key and value size.
+ */
+LIBBPF_API int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
+ const void *key, size_t key_sz,
+ void *value, size_t value_sz, __u64 flags);
-/* XDP related API */
-struct xdp_link_info {
- __u32 prog_id;
- __u32 drv_prog_id;
- __u32 hw_prog_id;
- __u32 skb_prog_id;
- __u8 attach_mode;
-};
+/**
+ * @brief **bpf_map__get_next_key()** allows to iterate BPF map keys by
+ * fetching next key that follows current key.
+ * @param map BPF map to fetch next key from
+ * @param cur_key pointer to memory containing bytes of current key or NULL to
+ * fetch the first key
+ * @param next_key pointer to memory to write next key into
+ * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size**
+ * @return 0, on success; -ENOENT if **cur_key** is the last key in BPF map;
+ * negative error, otherwise
+ *
+ * **bpf_map__get_next_key()** is high-level equivalent of
+ * **bpf_map_get_next_key()** API with added check for key size.
+ */
+LIBBPF_API int bpf_map__get_next_key(const struct bpf_map *map,
+ const void *cur_key, void *next_key, size_t key_sz);
struct bpf_xdp_set_link_opts {
size_t sz;
@@ -889,17 +939,6 @@ struct bpf_xdp_set_link_opts {
};
#define bpf_xdp_set_link_opts__last_field old_fd
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_xdp_attach() instead")
-LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_xdp_attach() instead")
-LIBBPF_API int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags,
- const struct bpf_xdp_set_link_opts *opts);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_xdp_query_id() instead")
-LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_xdp_query() instead")
-LIBBPF_API int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
- size_t info_size, __u32 flags);
-
struct bpf_xdp_attach_opts {
size_t sz;
int old_prog_fd;
@@ -998,17 +1037,7 @@ typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt);
/* common use perf buffer options */
struct perf_buffer_opts {
- union {
- size_t sz;
- struct { /* DEPRECATED: will be removed in v1.0 */
- /* if specified, sample_cb is called for each sample */
- perf_buffer_sample_fn sample_cb;
- /* if specified, lost_cb is called for each batch of lost samples */
- perf_buffer_lost_fn lost_cb;
- /* ctx is provided to sample_cb and lost_cb */
- void *ctx;
- };
- };
+ size_t sz;
};
#define perf_buffer_opts__last_field sz
@@ -1029,21 +1058,6 @@ perf_buffer__new(int map_fd, size_t page_cnt,
perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx,
const struct perf_buffer_opts *opts);
-LIBBPF_API struct perf_buffer *
-perf_buffer__new_v0_6_0(int map_fd, size_t page_cnt,
- perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx,
- const struct perf_buffer_opts *opts);
-
-LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use new variant of perf_buffer__new() instead")
-struct perf_buffer *perf_buffer__new_deprecated(int map_fd, size_t page_cnt,
- const struct perf_buffer_opts *opts);
-
-#define perf_buffer__new(...) ___libbpf_overload(___perf_buffer_new, __VA_ARGS__)
-#define ___perf_buffer_new6(map_fd, page_cnt, sample_cb, lost_cb, ctx, opts) \
- perf_buffer__new(map_fd, page_cnt, sample_cb, lost_cb, ctx, opts)
-#define ___perf_buffer_new3(map_fd, page_cnt, opts) \
- perf_buffer__new_deprecated(map_fd, page_cnt, opts)
-
enum bpf_perf_event_ret {
LIBBPF_PERF_EVENT_DONE = 0,
LIBBPF_PERF_EVENT_ERROR = -1,
@@ -1057,21 +1071,9 @@ typedef enum bpf_perf_event_ret
/* raw perf buffer options, giving most power and control */
struct perf_buffer_raw_opts {
- union {
- struct {
- size_t sz;
- long :0;
- long :0;
- };
- struct { /* DEPRECATED: will be removed in v1.0 */
- /* perf event attrs passed directly into perf_event_open() */
- struct perf_event_attr *attr;
- /* raw event callback */
- perf_buffer_event_fn event_cb;
- /* ctx is provided to event_cb */
- void *ctx;
- };
- };
+ size_t sz;
+ long :0;
+ long :0;
/* if cpu_cnt == 0, open all on all possible CPUs (up to the number of
* max_entries of given PERF_EVENT_ARRAY map)
*/
@@ -1083,26 +1085,13 @@ struct perf_buffer_raw_opts {
};
#define perf_buffer_raw_opts__last_field map_keys
+struct perf_event_attr;
+
LIBBPF_API struct perf_buffer *
perf_buffer__new_raw(int map_fd, size_t page_cnt, struct perf_event_attr *attr,
perf_buffer_event_fn event_cb, void *ctx,
const struct perf_buffer_raw_opts *opts);
-LIBBPF_API struct perf_buffer *
-perf_buffer__new_raw_v0_6_0(int map_fd, size_t page_cnt, struct perf_event_attr *attr,
- perf_buffer_event_fn event_cb, void *ctx,
- const struct perf_buffer_raw_opts *opts);
-
-LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use new variant of perf_buffer__new_raw() instead")
-struct perf_buffer *perf_buffer__new_raw_deprecated(int map_fd, size_t page_cnt,
- const struct perf_buffer_raw_opts *opts);
-
-#define perf_buffer__new_raw(...) ___libbpf_overload(___perf_buffer_new_raw, __VA_ARGS__)
-#define ___perf_buffer_new_raw6(map_fd, page_cnt, attr, event_cb, ctx, opts) \
- perf_buffer__new_raw(map_fd, page_cnt, attr, event_cb, ctx, opts)
-#define ___perf_buffer_new_raw3(map_fd, page_cnt, opts) \
- perf_buffer__new_raw_deprecated(map_fd, page_cnt, opts)
-
LIBBPF_API void perf_buffer__free(struct perf_buffer *pb);
LIBBPF_API int perf_buffer__epoll_fd(const struct perf_buffer *pb);
LIBBPF_API int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms);
@@ -1110,15 +1099,22 @@ LIBBPF_API int perf_buffer__consume(struct perf_buffer *pb);
LIBBPF_API int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx);
LIBBPF_API size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb);
LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx);
-
-typedef enum bpf_perf_event_ret
- (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
- void *private_data);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use perf_buffer__poll() or perf_buffer__consume() instead")
-LIBBPF_API enum bpf_perf_event_ret
-bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
- void **copy_mem, size_t *copy_size,
- bpf_perf_event_print_t fn, void *private_data);
+/**
+ * @brief **perf_buffer__buffer()** returns the per-cpu raw mmap()'ed underlying
+ * memory region of the ring buffer.
+ * This ring buffer can be used to implement a custom events consumer.
+ * The ring buffer starts with the *struct perf_event_mmap_page*, which
+ * holds the ring buffer managment fields, when accessing the header
+ * structure it's important to be SMP aware.
+ * You can refer to *perf_event_read_simple* for a simple example.
+ * @param pb the perf buffer structure
+ * @param buf_idx the buffer index to retreive
+ * @param buf (out) gets the base pointer of the mmap()'ed memory
+ * @param buf_size (out) gets the size of the mmap()'ed region
+ * @return 0 on success, negative error code for failure
+ */
+LIBBPF_API int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf,
+ size_t *buf_size);
struct bpf_prog_linfo;
struct bpf_prog_info;
@@ -1141,14 +1137,6 @@ bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo,
* user, causing subsequent probes to fail. In this case, the caller may want
* to adjust that limit with setrlimit().
*/
-LIBBPF_DEPRECATED_SINCE(0, 8, "use libbpf_probe_bpf_prog_type() instead")
-LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use libbpf_probe_bpf_map_type() instead")
-LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex);
-LIBBPF_DEPRECATED_SINCE(0, 8, "use libbpf_probe_bpf_helper() instead")
-LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type, __u32 ifindex);
-LIBBPF_DEPRECATED_SINCE(0, 8, "implement your own or use bpftool for feature detection")
-LIBBPF_API bool bpf_probe_large_insn_limit(__u32 ifindex);
/**
* @brief **libbpf_probe_bpf_prog_type()** detects if host kernel supports
@@ -1192,72 +1180,6 @@ LIBBPF_API int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void
LIBBPF_API int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type,
enum bpf_func_id helper_id, const void *opts);
-/*
- * Get bpf_prog_info in continuous memory
- *
- * struct bpf_prog_info has multiple arrays. The user has option to choose
- * arrays to fetch from kernel. The following APIs provide an uniform way to
- * fetch these data. All arrays in bpf_prog_info are stored in a single
- * continuous memory region. This makes it easy to store the info in a
- * file.
- *
- * Before writing bpf_prog_info_linear to files, it is necessary to
- * translate pointers in bpf_prog_info to offsets. Helper functions
- * bpf_program__bpil_addr_to_offs() and bpf_program__bpil_offs_to_addr()
- * are introduced to switch between pointers and offsets.
- *
- * Examples:
- * # To fetch map_ids and prog_tags:
- * __u64 arrays = (1UL << BPF_PROG_INFO_MAP_IDS) |
- * (1UL << BPF_PROG_INFO_PROG_TAGS);
- * struct bpf_prog_info_linear *info_linear =
- * bpf_program__get_prog_info_linear(fd, arrays);
- *
- * # To save data in file
- * bpf_program__bpil_addr_to_offs(info_linear);
- * write(f, info_linear, sizeof(*info_linear) + info_linear->data_len);
- *
- * # To read data from file
- * read(f, info_linear, <proper_size>);
- * bpf_program__bpil_offs_to_addr(info_linear);
- */
-enum bpf_prog_info_array {
- BPF_PROG_INFO_FIRST_ARRAY = 0,
- BPF_PROG_INFO_JITED_INSNS = 0,
- BPF_PROG_INFO_XLATED_INSNS,
- BPF_PROG_INFO_MAP_IDS,
- BPF_PROG_INFO_JITED_KSYMS,
- BPF_PROG_INFO_JITED_FUNC_LENS,
- BPF_PROG_INFO_FUNC_INFO,
- BPF_PROG_INFO_LINE_INFO,
- BPF_PROG_INFO_JITED_LINE_INFO,
- BPF_PROG_INFO_PROG_TAGS,
- BPF_PROG_INFO_LAST_ARRAY,
-};
-
-struct bpf_prog_info_linear {
- /* size of struct bpf_prog_info, when the tool is compiled */
- __u32 info_len;
- /* total bytes allocated for data, round up to 8 bytes */
- __u32 data_len;
- /* which arrays are included in data */
- __u64 arrays;
- struct bpf_prog_info info;
- __u8 data[];
-};
-
-LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper")
-LIBBPF_API struct bpf_prog_info_linear *
-bpf_program__get_prog_info_linear(int fd, __u64 arrays);
-
-LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper")
-LIBBPF_API void
-bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear);
-
-LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper")
-LIBBPF_API void
-bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
-
/**
* @brief **libbpf_num_possible_cpus()** is a helper function to get the
* number of possible CPUs that the host kernel supports and expects.
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index dd35ee58bfaa..119e6e1ea7f1 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -1,29 +1,14 @@
LIBBPF_0.0.1 {
global:
bpf_btf_get_fd_by_id;
- bpf_create_map;
- bpf_create_map_in_map;
- bpf_create_map_in_map_node;
- bpf_create_map_name;
- bpf_create_map_node;
- bpf_create_map_xattr;
- bpf_load_btf;
- bpf_load_program;
- bpf_load_program_xattr;
bpf_map__btf_key_type_id;
bpf_map__btf_value_type_id;
- bpf_map__def;
bpf_map__fd;
- bpf_map__is_offload_neutral;
bpf_map__name;
- bpf_map__next;
bpf_map__pin;
- bpf_map__prev;
- bpf_map__priv;
bpf_map__reuse_fd;
bpf_map__set_ifindex;
bpf_map__set_inner_map_fd;
- bpf_map__set_priv;
bpf_map__unpin;
bpf_map_delete_elem;
bpf_map_get_fd_by_id;
@@ -38,79 +23,37 @@ LIBBPF_0.0.1 {
bpf_object__btf_fd;
bpf_object__close;
bpf_object__find_map_by_name;
- bpf_object__find_map_by_offset;
- bpf_object__find_program_by_title;
bpf_object__kversion;
bpf_object__load;
bpf_object__name;
- bpf_object__next;
bpf_object__open;
- bpf_object__open_buffer;
- bpf_object__open_xattr;
bpf_object__pin;
bpf_object__pin_maps;
bpf_object__pin_programs;
- bpf_object__priv;
- bpf_object__set_priv;
- bpf_object__unload;
bpf_object__unpin_maps;
bpf_object__unpin_programs;
- bpf_perf_event_read_simple;
bpf_prog_attach;
bpf_prog_detach;
bpf_prog_detach2;
bpf_prog_get_fd_by_id;
bpf_prog_get_next_id;
- bpf_prog_load;
- bpf_prog_load_xattr;
bpf_prog_query;
- bpf_prog_test_run;
- bpf_prog_test_run_xattr;
bpf_program__fd;
- bpf_program__is_kprobe;
- bpf_program__is_perf_event;
- bpf_program__is_raw_tracepoint;
- bpf_program__is_sched_act;
- bpf_program__is_sched_cls;
- bpf_program__is_socket_filter;
- bpf_program__is_tracepoint;
- bpf_program__is_xdp;
- bpf_program__load;
- bpf_program__next;
- bpf_program__nth_fd;
bpf_program__pin;
- bpf_program__pin_instance;
- bpf_program__prev;
- bpf_program__priv;
bpf_program__set_expected_attach_type;
bpf_program__set_ifindex;
- bpf_program__set_kprobe;
- bpf_program__set_perf_event;
- bpf_program__set_prep;
- bpf_program__set_priv;
- bpf_program__set_raw_tracepoint;
- bpf_program__set_sched_act;
- bpf_program__set_sched_cls;
- bpf_program__set_socket_filter;
- bpf_program__set_tracepoint;
bpf_program__set_type;
- bpf_program__set_xdp;
- bpf_program__title;
bpf_program__unload;
bpf_program__unpin;
- bpf_program__unpin_instance;
bpf_prog_linfo__free;
bpf_prog_linfo__new;
bpf_prog_linfo__lfind_addr_func;
bpf_prog_linfo__lfind;
bpf_raw_tracepoint_open;
- bpf_set_link_xdp_fd;
bpf_task_fd_query;
- bpf_verify_program;
btf__fd;
btf__find_by_name;
btf__free;
- btf__get_from_id;
btf__name_by_offset;
btf__new;
btf__resolve_size;
@@ -127,48 +70,24 @@ LIBBPF_0.0.1 {
LIBBPF_0.0.2 {
global:
- bpf_probe_helper;
- bpf_probe_map_type;
- bpf_probe_prog_type;
- bpf_map__resize;
bpf_map_lookup_elem_flags;
bpf_object__btf;
bpf_object__find_map_fd_by_name;
- bpf_get_link_xdp_id;
- btf__dedup;
- btf__get_map_kv_tids;
- btf__get_nr_types;
btf__get_raw_data;
- btf__load;
btf_ext__free;
- btf_ext__func_info_rec_size;
btf_ext__get_raw_data;
- btf_ext__line_info_rec_size;
btf_ext__new;
- btf_ext__reloc_func_info;
- btf_ext__reloc_line_info;
- xsk_umem__create;
- xsk_socket__create;
- xsk_umem__delete;
- xsk_socket__delete;
- xsk_umem__fd;
- xsk_socket__fd;
- bpf_program__get_prog_info_linear;
- bpf_program__bpil_addr_to_offs;
- bpf_program__bpil_offs_to_addr;
} LIBBPF_0.0.1;
LIBBPF_0.0.3 {
global:
bpf_map__is_internal;
bpf_map_freeze;
- btf__finalize_data;
} LIBBPF_0.0.2;
LIBBPF_0.0.4 {
global:
bpf_link__destroy;
- bpf_object__load_xattr;
bpf_program__attach_kprobe;
bpf_program__attach_perf_event;
bpf_program__attach_raw_tracepoint;
@@ -176,14 +95,10 @@ LIBBPF_0.0.4 {
bpf_program__attach_uprobe;
btf_dump__dump_type;
btf_dump__free;
- btf_dump__new;
btf__parse_elf;
libbpf_num_possible_cpus;
perf_buffer__free;
- perf_buffer__new;
- perf_buffer__new_raw;
perf_buffer__poll;
- xsk_umem__create;
} LIBBPF_0.0.3;
LIBBPF_0.0.5 {
@@ -193,7 +108,6 @@ LIBBPF_0.0.5 {
LIBBPF_0.0.6 {
global:
- bpf_get_link_xdp_info;
bpf_map__get_pin_path;
bpf_map__is_pinned;
bpf_map__set_pin_path;
@@ -202,9 +116,6 @@ LIBBPF_0.0.6 {
bpf_program__attach_trace;
bpf_program__get_expected_attach_type;
bpf_program__get_type;
- bpf_program__is_tracing;
- bpf_program__set_tracing;
- bpf_program__size;
btf__find_by_name_kind;
libbpf_find_vmlinux_btf_id;
} LIBBPF_0.0.5;
@@ -224,14 +135,8 @@ LIBBPF_0.0.7 {
bpf_object__detach_skeleton;
bpf_object__load_skeleton;
bpf_object__open_skeleton;
- bpf_probe_large_insn_limit;
- bpf_prog_attach_xattr;
bpf_program__attach;
bpf_program__name;
- bpf_program__is_extension;
- bpf_program__is_struct_ops;
- bpf_program__set_extension;
- bpf_program__set_struct_ops;
btf__align_of;
libbpf_find_kernel_btf;
} LIBBPF_0.0.6;
@@ -250,10 +155,7 @@ LIBBPF_0.0.8 {
bpf_prog_attach_opts;
bpf_program__attach_cgroup;
bpf_program__attach_lsm;
- bpf_program__is_lsm;
bpf_program__set_attach_target;
- bpf_program__set_lsm;
- bpf_set_link_xdp_fd_opts;
} LIBBPF_0.0.7;
LIBBPF_0.0.9 {
@@ -291,9 +193,7 @@ LIBBPF_0.1.0 {
bpf_map__value_size;
bpf_program__attach_xdp;
bpf_program__autoload;
- bpf_program__is_sk_lookup;
bpf_program__set_autoload;
- bpf_program__set_sk_lookup;
btf__parse;
btf__parse_raw;
btf__pointer_size;
@@ -336,7 +236,6 @@ LIBBPF_0.2.0 {
perf_buffer__buffer_fd;
perf_buffer__epoll_fd;
perf_buffer__consume_buffer;
- xsk_socket__create_shared;
} LIBBPF_0.1.0;
LIBBPF_0.3.0 {
@@ -348,8 +247,6 @@ LIBBPF_0.3.0 {
btf__new_empty_split;
btf__new_split;
ring_buffer__epoll_fd;
- xsk_setup_xdp_prog;
- xsk_socket__update_xskmap;
} LIBBPF_0.2.0;
LIBBPF_0.4.0 {
@@ -397,7 +294,6 @@ LIBBPF_0.6.0 {
bpf_object__next_program;
bpf_object__prev_map;
bpf_object__prev_program;
- bpf_prog_load_deprecated;
bpf_prog_load;
bpf_program__flags;
bpf_program__insn_cnt;
@@ -407,18 +303,14 @@ LIBBPF_0.6.0 {
btf__add_decl_tag;
btf__add_type_tag;
btf__dedup;
- btf__dedup_deprecated;
btf__raw_data;
btf__type_cnt;
btf_dump__new;
- btf_dump__new_deprecated;
libbpf_major_version;
libbpf_minor_version;
libbpf_version_string;
perf_buffer__new;
- perf_buffer__new_deprecated;
perf_buffer__new_raw;
- perf_buffer__new_raw_deprecated;
} LIBBPF_0.5.0;
LIBBPF_0.7.0 {
@@ -434,17 +326,43 @@ LIBBPF_0.7.0 {
bpf_xdp_detach;
bpf_xdp_query;
bpf_xdp_query_id;
+ btf_ext__raw_data;
libbpf_probe_bpf_helper;
libbpf_probe_bpf_map_type;
libbpf_probe_bpf_prog_type;
- libbpf_set_memlock_rlim_max;
+ libbpf_set_memlock_rlim;
} LIBBPF_0.6.0;
LIBBPF_0.8.0 {
global:
+ bpf_map__autocreate;
+ bpf_map__get_next_key;
+ bpf_map__delete_elem;
+ bpf_map__lookup_and_delete_elem;
+ bpf_map__lookup_elem;
+ bpf_map__set_autocreate;
+ bpf_map__update_elem;
+ bpf_map_delete_elem_flags;
bpf_object__destroy_subskeleton;
bpf_object__open_subskeleton;
+ bpf_program__attach_kprobe_multi_opts;
+ bpf_program__attach_trace_opts;
+ bpf_program__attach_usdt;
+ bpf_program__set_insns;
libbpf_register_prog_handler;
libbpf_unregister_prog_handler;
- bpf_program__attach_kprobe_multi_opts;
} LIBBPF_0.7.0;
+
+LIBBPF_1.0.0 {
+ global:
+ bpf_obj_get_opts;
+ bpf_prog_query_opts;
+ bpf_program__attach_ksyscall;
+ btf__add_enum64;
+ btf__add_enum64_value;
+ libbpf_bpf_attach_type_str;
+ libbpf_bpf_link_type_str;
+ libbpf_bpf_map_type_str;
+ libbpf_bpf_prog_type_str;
+ perf_buffer__buffer;
+};
diff --git a/tools/lib/bpf/libbpf_common.h b/tools/lib/bpf/libbpf_common.h
index 000e37798ff2..9a7937f339df 100644
--- a/tools/lib/bpf/libbpf_common.h
+++ b/tools/lib/bpf/libbpf_common.h
@@ -30,20 +30,10 @@
/* Add checks for other versions below when planning deprecation of API symbols
* with the LIBBPF_DEPRECATED_SINCE macro.
*/
-#if __LIBBPF_CURRENT_VERSION_GEQ(0, 6)
-#define __LIBBPF_MARK_DEPRECATED_0_6(X) X
+#if __LIBBPF_CURRENT_VERSION_GEQ(1, 0)
+#define __LIBBPF_MARK_DEPRECATED_1_0(X) X
#else
-#define __LIBBPF_MARK_DEPRECATED_0_6(X)
-#endif
-#if __LIBBPF_CURRENT_VERSION_GEQ(0, 7)
-#define __LIBBPF_MARK_DEPRECATED_0_7(X) X
-#else
-#define __LIBBPF_MARK_DEPRECATED_0_7(X)
-#endif
-#if __LIBBPF_CURRENT_VERSION_GEQ(0, 8)
-#define __LIBBPF_MARK_DEPRECATED_0_8(X) X
-#else
-#define __LIBBPF_MARK_DEPRECATED_0_8(X)
+#define __LIBBPF_MARK_DEPRECATED_1_0(X)
#endif
/* This set of internal macros allows to do "function overloading" based on
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index b6247dc7f8eb..4135ae0a2bc3 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -15,7 +15,6 @@
#include <linux/err.h>
#include <fcntl.h>
#include <unistd.h>
-#include "libbpf_legacy.h"
#include "relo_core.h"
/* make sure libbpf doesn't use kernel-only integer typedefs */
@@ -103,6 +102,17 @@
#define str_has_pfx(str, pfx) \
(strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0)
+/* suffix check */
+static inline bool str_has_sfx(const char *str, const char *sfx)
+{
+ size_t str_len = strlen(str);
+ size_t sfx_len = strlen(sfx);
+
+ if (sfx_len > str_len)
+ return false;
+ return strcmp(str + str_len - sfx_len, sfx) == 0;
+}
+
/* Symbol versioning is different between static and shared library.
* Properly versioned symbols are needed for shared library, but
* only the symbol of the new version is needed for static library.
@@ -148,6 +158,15 @@ do { \
#ifndef __has_builtin
#define __has_builtin(x) 0
#endif
+
+struct bpf_link {
+ int (*detach)(struct bpf_link *link);
+ void (*dealloc)(struct bpf_link *link);
+ char *pin_path; /* NULL, if not pinned */
+ int fd; /* hook FD, -1 if not applicable */
+ bool disconnected;
+};
+
/*
* Re-implement glibc's reallocarray() for libbpf internal-only use.
* reallocarray(), unfortunately, is not available in all versions of glibc,
@@ -329,6 +348,12 @@ enum kern_feature_id {
FEAT_BTF_TYPE_TAG,
/* memcg-based accounting for BPF maps and progs */
FEAT_MEMCG_ACCOUNT,
+ /* BPF cookie (bpf_get_attach_cookie() BPF helper) support */
+ FEAT_BPF_COOKIE,
+ /* BTF_KIND_ENUM64 support and BTF_KIND_ENUM kflag support */
+ FEAT_BTF_ENUM64,
+ /* Kernel uses syscall wrapper (CONFIG_ARCH_HAS_SYSCALL_WRAPPER) */
+ FEAT_SYSCALL_WRAPPER,
__FEAT_CNT,
};
@@ -354,6 +379,13 @@ struct btf_ext_info {
void *info;
__u32 rec_size;
__u32 len;
+ /* optional (maintained internally by libbpf) mapping between .BTF.ext
+ * section and corresponding ELF section. This is used to join
+ * information like CO-RE relocation records with corresponding BPF
+ * programs defined in ELF sections
+ */
+ __u32 *sec_idxs;
+ int sec_cnt;
};
#define for_each_btf_ext_sec(seg, sec) \
@@ -447,8 +479,6 @@ int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void
__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
__u32 kind);
-extern enum libbpf_strict_mode libbpf_mode;
-
typedef int (*kallsyms_cb_t)(unsigned long long sym_addr, char sym_type,
const char *sym_name, void *ctx);
@@ -467,12 +497,8 @@ static inline int libbpf_err(int ret)
*/
static inline int libbpf_err_errno(int ret)
{
- if (libbpf_mode & LIBBPF_STRICT_DIRECT_ERRS)
- /* errno is already assumed to be set on error */
- return ret < 0 ? -errno : ret;
-
- /* legacy: on error return -1 directly and don't touch errno */
- return ret;
+ /* errno is already assumed to be set on error */
+ return ret < 0 ? -errno : ret;
}
/* handle error for pointer-returning APIs, err is assumed to be < 0 always */
@@ -480,12 +506,7 @@ static inline void *libbpf_err_ptr(int err)
{
/* set errno on error, this doesn't break anything */
errno = -err;
-
- if (libbpf_mode & LIBBPF_STRICT_CLEAN_PTRS)
- return NULL;
-
- /* legacy: encode err as ptr */
- return ERR_PTR(err);
+ return NULL;
}
/* handle pointer-returning APIs' error handling */
@@ -495,11 +516,7 @@ static inline void *libbpf_ptr(void *ret)
if (IS_ERR(ret))
errno = -PTR_ERR(ret);
- if (libbpf_mode & LIBBPF_STRICT_CLEAN_PTRS)
- return IS_ERR(ret) ? NULL : ret;
-
- /* legacy: pass-through original pointer */
- return ret;
+ return IS_ERR(ret) ? NULL : ret;
}
static inline bool str_is_empty(const char *s)
@@ -543,4 +560,17 @@ int bpf_core_add_cands(struct bpf_core_cand *local_cand,
struct bpf_core_cand_list *cands);
void bpf_core_free_cands(struct bpf_core_cand_list *cands);
+struct usdt_manager *usdt_manager_new(struct bpf_object *obj);
+void usdt_manager_free(struct usdt_manager *man);
+struct bpf_link * usdt_manager_attach_usdt(struct usdt_manager *man,
+ const struct bpf_program *prog,
+ pid_t pid, const char *path,
+ const char *usdt_provider, const char *usdt_name,
+ __u64 usdt_cookie);
+
+static inline bool is_pow_of_2(size_t x)
+{
+ return x && (x & (x - 1)) == 0;
+}
+
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
diff --git a/tools/lib/bpf/libbpf_legacy.h b/tools/lib/bpf/libbpf_legacy.h
index d7bcbd01f66f..5b7e0155db6a 100644
--- a/tools/lib/bpf/libbpf_legacy.h
+++ b/tools/lib/bpf/libbpf_legacy.h
@@ -20,6 +20,11 @@
extern "C" {
#endif
+/* As of libbpf 1.0 libbpf_set_strict_mode() and enum libbpf_struct_mode have
+ * no effect. But they are left in libbpf_legacy.h so that applications that
+ * prepared for libbpf 1.0 before final release by using
+ * libbpf_set_strict_mode() still work with libbpf 1.0+ without any changes.
+ */
enum libbpf_strict_mode {
/* Turn on all supported strict features of libbpf to simulate libbpf
* v1.0 behavior.
@@ -71,8 +76,8 @@ enum libbpf_strict_mode {
* first BPF program or map creation operation. This is done only if
* kernel is too old to support memcg-based memory accounting for BPF
* subsystem. By default, RLIMIT_MEMLOCK limit is set to RLIM_INFINITY,
- * but it can be overriden with libbpf_set_memlock_rlim_max() API.
- * Note that libbpf_set_memlock_rlim_max() needs to be called before
+ * but it can be overriden with libbpf_set_memlock_rlim() API.
+ * Note that libbpf_set_memlock_rlim() needs to be called before
* the very first bpf_prog_load(), bpf_map_create() or bpf_object__load()
* operation.
*/
@@ -88,6 +93,25 @@ enum libbpf_strict_mode {
LIBBPF_API int libbpf_set_strict_mode(enum libbpf_strict_mode mode);
+/**
+ * @brief **libbpf_get_error()** extracts the error code from the passed
+ * pointer
+ * @param ptr pointer returned from libbpf API function
+ * @return error code; or 0 if no error occured
+ *
+ * Note, as of libbpf 1.0 this function is not necessary and not recommended
+ * to be used. Libbpf doesn't return error code embedded into the pointer
+ * itself. Instead, NULL is returned on error and error code is passed through
+ * thread-local errno variable. **libbpf_get_error()** is just returning -errno
+ * value if it receives NULL, which is correct only if errno hasn't been
+ * modified between libbpf API call and corresponding **libbpf_get_error()**
+ * call. Prefer to check return for NULL and use errno directly.
+ *
+ * This API is left in libbpf 1.0 to allow applications that were 1.0-ready
+ * before final libbpf 1.0 without needing to change them.
+ */
+LIBBPF_API long libbpf_get_error(const void *ptr);
+
#define DECLARE_LIBBPF_OPTS LIBBPF_OPTS
/* "Discouraged" APIs which don't follow consistent libbpf naming patterns.
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index 97b06cede56f..0b5398786bf3 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -17,47 +17,14 @@
#include "libbpf.h"
#include "libbpf_internal.h"
-static bool grep(const char *buffer, const char *pattern)
-{
- return !!strstr(buffer, pattern);
-}
-
-static int get_vendor_id(int ifindex)
-{
- char ifname[IF_NAMESIZE], path[64], buf[8];
- ssize_t len;
- int fd;
-
- if (!if_indextoname(ifindex, ifname))
- return -1;
-
- snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
-
- fd = open(path, O_RDONLY | O_CLOEXEC);
- if (fd < 0)
- return -1;
-
- len = read(fd, buf, sizeof(buf));
- close(fd);
- if (len < 0)
- return -1;
- if (len >= (ssize_t)sizeof(buf))
- return -1;
- buf[len] = '\0';
-
- return strtol(buf, NULL, 0);
-}
-
static int probe_prog_load(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, size_t insns_cnt,
- char *log_buf, size_t log_buf_sz,
- __u32 ifindex)
+ char *log_buf, size_t log_buf_sz)
{
LIBBPF_OPTS(bpf_prog_load_opts, opts,
.log_buf = log_buf,
.log_size = log_buf_sz,
.log_level = log_buf ? 1 : 0,
- .prog_ifindex = ifindex,
);
int fd, err, exp_err = 0;
const char *exp_msg = NULL;
@@ -161,31 +128,10 @@ int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts)
if (opts)
return libbpf_err(-EINVAL);
- ret = probe_prog_load(prog_type, insns, insn_cnt, NULL, 0, 0);
+ ret = probe_prog_load(prog_type, insns, insn_cnt, NULL, 0);
return libbpf_err(ret);
}
-bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
-{
- struct bpf_insn insns[2] = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN()
- };
-
- /* prefer libbpf_probe_bpf_prog_type() unless offload is requested */
- if (ifindex == 0)
- return libbpf_probe_bpf_prog_type(prog_type, NULL) == 1;
-
- if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS)
- /* nfp returns -EINVAL on exit(0) with TC offload */
- insns[0].imm = 2;
-
- errno = 0;
- probe_prog_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex);
-
- return errno != EINVAL && errno != EOPNOTSUPP;
-}
-
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
const char *str_sec, size_t str_len)
{
@@ -242,15 +188,13 @@ static int load_local_storage_btf(void)
strs, sizeof(strs));
}
-static int probe_map_create(enum bpf_map_type map_type, __u32 ifindex)
+static int probe_map_create(enum bpf_map_type map_type)
{
LIBBPF_OPTS(bpf_map_create_opts, opts);
int key_size, value_size, max_entries;
__u32 btf_key_type_id = 0, btf_value_type_id = 0;
int fd = -1, btf_fd = -1, fd_inner = -1, exp_err = 0, err;
- opts.map_ifindex = ifindex;
-
key_size = sizeof(__u32);
value_size = sizeof(__u32);
max_entries = 1;
@@ -326,12 +270,6 @@ static int probe_map_create(enum bpf_map_type map_type, __u32 ifindex)
if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
- /* TODO: probe for device, once libbpf has a function to create
- * map-in-map for offload
- */
- if (ifindex)
- goto cleanup;
-
fd_inner = bpf_map_create(BPF_MAP_TYPE_HASH, NULL,
sizeof(__u32), sizeof(__u32), 1, NULL);
if (fd_inner < 0)
@@ -370,15 +308,10 @@ int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void *opts)
if (opts)
return libbpf_err(-EINVAL);
- ret = probe_map_create(map_type, 0);
+ ret = probe_map_create(map_type);
return libbpf_err(ret);
}
-bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
-{
- return probe_map_create(map_type, ifindex) == 1;
-}
-
int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helper_id,
const void *opts)
{
@@ -407,7 +340,7 @@ int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helpe
}
buf[0] = '\0';
- ret = probe_prog_load(prog_type, insns, insn_cnt, buf, sizeof(buf), 0);
+ ret = probe_prog_load(prog_type, insns, insn_cnt, buf, sizeof(buf));
if (ret < 0)
return libbpf_err(ret);
@@ -427,51 +360,3 @@ int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helpe
return 0;
return 1; /* assume supported */
}
-
-bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type,
- __u32 ifindex)
-{
- struct bpf_insn insns[2] = {
- BPF_EMIT_CALL(id),
- BPF_EXIT_INSN()
- };
- char buf[4096] = {};
- bool res;
-
- probe_prog_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf), ifindex);
- res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ");
-
- if (ifindex) {
- switch (get_vendor_id(ifindex)) {
- case 0x19ee: /* Netronome specific */
- res = res && !grep(buf, "not supported by FW") &&
- !grep(buf, "unsupported function id");
- break;
- default:
- break;
- }
- }
-
- return res;
-}
-
-/*
- * Probe for availability of kernel commit (5.3):
- *
- * c04c0d2b968a ("bpf: increase complexity limit and maximum program size")
- */
-bool bpf_probe_large_insn_limit(__u32 ifindex)
-{
- struct bpf_insn insns[BPF_MAXINSNS + 1];
- int i;
-
- for (i = 0; i < BPF_MAXINSNS; i++)
- insns[i] = BPF_MOV64_IMM(BPF_REG_0, 1);
- insns[BPF_MAXINSNS] = BPF_EXIT_INSN();
-
- errno = 0;
- probe_prog_load(BPF_PROG_TYPE_SCHED_CLS, insns, ARRAY_SIZE(insns), NULL, 0,
- ifindex);
-
- return errno != E2BIG && errno != EINVAL;
-}
diff --git a/tools/lib/bpf/libbpf_version.h b/tools/lib/bpf/libbpf_version.h
index 61f2039404b6..2fb2f4290080 100644
--- a/tools/lib/bpf/libbpf_version.h
+++ b/tools/lib/bpf/libbpf_version.h
@@ -3,7 +3,7 @@
#ifndef __LIBBPF_VERSION_H
#define __LIBBPF_VERSION_H
-#define LIBBPF_MAJOR_VERSION 0
-#define LIBBPF_MINOR_VERSION 8
+#define LIBBPF_MAJOR_VERSION 1
+#define LIBBPF_MINOR_VERSION 0
#endif /* __LIBBPF_VERSION_H */
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
index 9aa016fb55aa..4ac02c28e152 100644
--- a/tools/lib/bpf/linker.c
+++ b/tools/lib/bpf/linker.c
@@ -697,11 +697,6 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
return err;
}
-static bool is_pow_of_2(size_t x)
-{
- return x && (x & (x - 1)) == 0;
-}
-
static int linker_sanity_check_elf(struct src_obj *obj)
{
struct src_sec *sec;
@@ -1340,6 +1335,7 @@ recur:
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
case BTF_KIND_FWD:
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
@@ -1362,6 +1358,7 @@ recur:
case BTF_KIND_INT:
case BTF_KIND_FLOAT:
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
/* ignore encoding for int and enum values for enum */
if (t1->size != t2->size) {
pr_warn("global '%s': incompatible %s '%s' size %u and %u\n",
diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
index cbc8967d5402..6c013168032d 100644
--- a/tools/lib/bpf/netlink.c
+++ b/tools/lib/bpf/netlink.c
@@ -27,6 +27,14 @@ typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb);
typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, libbpf_dump_nlmsg_t,
void *cookie);
+struct xdp_link_info {
+ __u32 prog_id;
+ __u32 drv_prog_id;
+ __u32 hw_prog_id;
+ __u32 skb_prog_id;
+ __u8 attach_mode;
+};
+
struct xdp_id_md {
int ifindex;
__u32 flags;
@@ -288,31 +296,6 @@ int bpf_xdp_detach(int ifindex, __u32 flags, const struct bpf_xdp_attach_opts *o
return bpf_xdp_attach(ifindex, -1, flags, opts);
}
-int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags,
- const struct bpf_xdp_set_link_opts *opts)
-{
- int old_fd = -1, ret;
-
- if (!OPTS_VALID(opts, bpf_xdp_set_link_opts))
- return libbpf_err(-EINVAL);
-
- if (OPTS_HAS(opts, old_fd)) {
- old_fd = OPTS_GET(opts, old_fd, -1);
- flags |= XDP_FLAGS_REPLACE;
- }
-
- ret = __bpf_set_link_xdp_fd_replace(ifindex, fd, old_fd, flags);
- return libbpf_err(ret);
-}
-
-int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
-{
- int ret;
-
- ret = __bpf_set_link_xdp_fd_replace(ifindex, fd, 0, flags);
- return libbpf_err(ret);
-}
-
static int __dump_link_nlmsg(struct nlmsghdr *nlh,
libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie)
{
@@ -413,30 +396,6 @@ int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts)
return 0;
}
-int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
- size_t info_size, __u32 flags)
-{
- LIBBPF_OPTS(bpf_xdp_query_opts, opts);
- size_t sz;
- int err;
-
- if (!info_size)
- return libbpf_err(-EINVAL);
-
- err = bpf_xdp_query(ifindex, flags, &opts);
- if (err)
- return libbpf_err(err);
-
- /* struct xdp_link_info field layout matches struct bpf_xdp_query_opts
- * layout after sz field
- */
- sz = min(info_size, offsetofend(struct xdp_link_info, attach_mode));
- memcpy(info, &opts.prog_id, sz);
- memset((void *)info + sz, 0, info_size - sz);
-
- return 0;
-}
-
int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id)
{
LIBBPF_OPTS(bpf_xdp_query_opts, opts);
@@ -463,11 +422,6 @@ int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id)
}
-int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags)
-{
- return bpf_xdp_query_id(ifindex, flags, prog_id);
-}
-
typedef int (*qdisc_config_t)(struct libbpf_nla_req *req);
static int clsact_config(struct libbpf_nla_req *req)
diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c
index f946f23eab20..c4b0e81ae293 100644
--- a/tools/lib/bpf/relo_core.c
+++ b/tools/lib/bpf/relo_core.c
@@ -95,6 +95,7 @@ static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
case BPF_CORE_TYPE_ID_LOCAL: return "local_type_id";
case BPF_CORE_TYPE_ID_TARGET: return "target_type_id";
case BPF_CORE_TYPE_EXISTS: return "type_exists";
+ case BPF_CORE_TYPE_MATCHES: return "type_matches";
case BPF_CORE_TYPE_SIZE: return "type_size";
case BPF_CORE_ENUMVAL_EXISTS: return "enumval_exists";
case BPF_CORE_ENUMVAL_VALUE: return "enumval_value";
@@ -123,6 +124,7 @@ static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
case BPF_CORE_TYPE_ID_LOCAL:
case BPF_CORE_TYPE_ID_TARGET:
case BPF_CORE_TYPE_EXISTS:
+ case BPF_CORE_TYPE_MATCHES:
case BPF_CORE_TYPE_SIZE:
return true;
default:
@@ -141,6 +143,86 @@ static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
}
}
+int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
+ const struct btf *targ_btf, __u32 targ_id, int level)
+{
+ const struct btf_type *local_type, *targ_type;
+ int depth = 32; /* max recursion depth */
+
+ /* caller made sure that names match (ignoring flavor suffix) */
+ local_type = btf_type_by_id(local_btf, local_id);
+ targ_type = btf_type_by_id(targ_btf, targ_id);
+ if (!btf_kind_core_compat(local_type, targ_type))
+ return 0;
+
+recur:
+ depth--;
+ if (depth < 0)
+ return -EINVAL;
+
+ local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
+ targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
+ if (!local_type || !targ_type)
+ return -EINVAL;
+
+ if (!btf_kind_core_compat(local_type, targ_type))
+ return 0;
+
+ switch (btf_kind(local_type)) {
+ case BTF_KIND_UNKN:
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_FWD:
+ case BTF_KIND_ENUM64:
+ return 1;
+ case BTF_KIND_INT:
+ /* just reject deprecated bitfield-like integers; all other
+ * integers are by default compatible between each other
+ */
+ return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
+ case BTF_KIND_PTR:
+ local_id = local_type->type;
+ targ_id = targ_type->type;
+ goto recur;
+ case BTF_KIND_ARRAY:
+ local_id = btf_array(local_type)->type;
+ targ_id = btf_array(targ_type)->type;
+ goto recur;
+ case BTF_KIND_FUNC_PROTO: {
+ struct btf_param *local_p = btf_params(local_type);
+ struct btf_param *targ_p = btf_params(targ_type);
+ __u16 local_vlen = btf_vlen(local_type);
+ __u16 targ_vlen = btf_vlen(targ_type);
+ int i, err;
+
+ if (local_vlen != targ_vlen)
+ return 0;
+
+ for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
+ if (level <= 0)
+ return -EINVAL;
+
+ skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
+ skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
+ err = __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id,
+ level - 1);
+ if (err <= 0)
+ return err;
+ }
+
+ /* tail recurse for return type check */
+ skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
+ skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
+ goto recur;
+ }
+ default:
+ pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
+ btf_kind_str(local_type), local_id, targ_id);
+ return 0;
+ }
+}
+
/*
* Turn bpf_core_relo into a low- and high-level spec representation,
* validating correctness along the way, as well as calculating resulting
@@ -167,40 +249,39 @@ static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
* just a parsed access string representation): [0, 1, 2, 3].
*
* High-level spec will capture only 3 points:
- * - intial zero-index access by pointer (&s->... is the same as &s[0]...);
+ * - initial zero-index access by pointer (&s->... is the same as &s[0]...);
* - field 'a' access (corresponds to '2' in low-level spec);
* - array element #3 access (corresponds to '3' in low-level spec).
*
- * Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
+ * Type-based relocations (TYPE_EXISTS/TYPE_MATCHES/TYPE_SIZE,
* TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their
* spec and raw_spec are kept empty.
*
* Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
* string to specify enumerator's value index that need to be relocated.
*/
-static int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
- __u32 type_id,
- const char *spec_str,
- enum bpf_core_relo_kind relo_kind,
- struct bpf_core_spec *spec)
+int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
+ const struct bpf_core_relo *relo,
+ struct bpf_core_spec *spec)
{
int access_idx, parsed_len, i;
struct bpf_core_accessor *acc;
const struct btf_type *t;
- const char *name;
- __u32 id;
+ const char *name, *spec_str;
+ __u32 id, name_off;
__s64 sz;
+ spec_str = btf__name_by_offset(btf, relo->access_str_off);
if (str_is_empty(spec_str) || *spec_str == ':')
return -EINVAL;
memset(spec, 0, sizeof(*spec));
spec->btf = btf;
- spec->root_type_id = type_id;
- spec->relo_kind = relo_kind;
+ spec->root_type_id = relo->type_id;
+ spec->relo_kind = relo->kind;
/* type-based relocations don't have a field access string */
- if (core_relo_is_type_based(relo_kind)) {
+ if (core_relo_is_type_based(relo->kind)) {
if (strcmp(spec_str, "0"))
return -EINVAL;
return 0;
@@ -221,7 +302,7 @@ static int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
if (spec->raw_len == 0)
return -EINVAL;
- t = skip_mods_and_typedefs(btf, type_id, &id);
+ t = skip_mods_and_typedefs(btf, relo->type_id, &id);
if (!t)
return -EINVAL;
@@ -231,16 +312,18 @@ static int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
acc->idx = access_idx;
spec->len++;
- if (core_relo_is_enumval_based(relo_kind)) {
- if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
+ if (core_relo_is_enumval_based(relo->kind)) {
+ if (!btf_is_any_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
return -EINVAL;
/* record enumerator name in a first accessor */
- acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off);
+ name_off = btf_is_enum(t) ? btf_enum(t)[access_idx].name_off
+ : btf_enum64(t)[access_idx].name_off;
+ acc->name = btf__name_by_offset(btf, name_off);
return 0;
}
- if (!core_relo_is_field_based(relo_kind))
+ if (!core_relo_is_field_based(relo->kind))
return -EINVAL;
sz = btf__resolve_size(btf, id);
@@ -301,7 +384,7 @@ static int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
spec->bit_offset += access_idx * sz * 8;
} else {
pr_warn("prog '%s': relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
- prog_name, type_id, spec_str, i, id, btf_kind_str(t));
+ prog_name, relo->type_id, spec_str, i, id, btf_kind_str(t));
return -EINVAL;
}
}
@@ -341,7 +424,7 @@ recur:
if (btf_is_composite(local_type) && btf_is_composite(targ_type))
return 1;
- if (btf_kind(local_type) != btf_kind(targ_type))
+ if (!btf_kind_core_compat(local_type, targ_type))
return 0;
switch (btf_kind(local_type)) {
@@ -349,6 +432,7 @@ recur:
case BTF_KIND_FLOAT:
return 1;
case BTF_KIND_FWD:
+ case BTF_KIND_ENUM64:
case BTF_KIND_ENUM: {
const char *local_name, *targ_name;
size_t local_len, targ_len;
@@ -478,6 +562,7 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
const struct bpf_core_accessor *local_acc;
struct bpf_core_accessor *targ_acc;
int i, sz, matched;
+ __u32 name_off;
memset(targ_spec, 0, sizeof(*targ_spec));
targ_spec->btf = targ_btf;
@@ -485,9 +570,14 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
targ_spec->relo_kind = local_spec->relo_kind;
if (core_relo_is_type_based(local_spec->relo_kind)) {
- return bpf_core_types_are_compat(local_spec->btf,
- local_spec->root_type_id,
- targ_btf, targ_id);
+ if (local_spec->relo_kind == BPF_CORE_TYPE_MATCHES)
+ return bpf_core_types_match(local_spec->btf,
+ local_spec->root_type_id,
+ targ_btf, targ_id);
+ else
+ return bpf_core_types_are_compat(local_spec->btf,
+ local_spec->root_type_id,
+ targ_btf, targ_id);
}
local_acc = &local_spec->spec[0];
@@ -495,18 +585,22 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
if (core_relo_is_enumval_based(local_spec->relo_kind)) {
size_t local_essent_len, targ_essent_len;
- const struct btf_enum *e;
const char *targ_name;
/* has to resolve to an enum */
targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id);
- if (!btf_is_enum(targ_type))
+ if (!btf_is_any_enum(targ_type))
return 0;
local_essent_len = bpf_core_essential_name_len(local_acc->name);
- for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) {
- targ_name = btf__name_by_offset(targ_spec->btf, e->name_off);
+ for (i = 0; i < btf_vlen(targ_type); i++) {
+ if (btf_is_enum(targ_type))
+ name_off = btf_enum(targ_type)[i].name_off;
+ else
+ name_off = btf_enum64(targ_type)[i].name_off;
+
+ targ_name = btf__name_by_offset(targ_spec->btf, name_off);
targ_essent_len = bpf_core_essential_name_len(targ_name);
if (targ_essent_len != local_essent_len)
continue;
@@ -584,7 +678,7 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
static int bpf_core_calc_field_relo(const char *prog_name,
const struct bpf_core_relo *relo,
const struct bpf_core_spec *spec,
- __u32 *val, __u32 *field_sz, __u32 *type_id,
+ __u64 *val, __u32 *field_sz, __u32 *type_id,
bool *validate)
{
const struct bpf_core_accessor *acc;
@@ -681,8 +775,7 @@ static int bpf_core_calc_field_relo(const char *prog_name,
*val = byte_sz;
break;
case BPF_CORE_FIELD_SIGNED:
- /* enums will be assumed unsigned */
- *val = btf_is_enum(mt) ||
+ *val = (btf_is_any_enum(mt) && BTF_INFO_KFLAG(mt->info)) ||
(btf_int_encoding(mt) & BTF_INT_SIGNED);
if (validate)
*validate = true; /* signedness is never ambiguous */
@@ -709,7 +802,7 @@ static int bpf_core_calc_field_relo(const char *prog_name,
static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
const struct bpf_core_spec *spec,
- __u32 *val, bool *validate)
+ __u64 *val, bool *validate)
{
__s64 sz;
@@ -733,6 +826,7 @@ static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
*validate = false;
break;
case BPF_CORE_TYPE_EXISTS:
+ case BPF_CORE_TYPE_MATCHES:
*val = 1;
break;
case BPF_CORE_TYPE_SIZE:
@@ -752,10 +846,9 @@ static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
const struct bpf_core_spec *spec,
- __u32 *val)
+ __u64 *val)
{
const struct btf_type *t;
- const struct btf_enum *e;
switch (relo->kind) {
case BPF_CORE_ENUMVAL_EXISTS:
@@ -765,8 +858,10 @@ static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
if (!spec)
return -EUCLEAN; /* request instruction poisoning */
t = btf_type_by_id(spec->btf, spec->spec[0].type_id);
- e = btf_enum(t) + spec->spec[0].idx;
- *val = e->val;
+ if (btf_is_enum(t))
+ *val = btf_enum(t)[spec->spec[0].idx].val;
+ else
+ *val = btf_enum64_value(btf_enum64(t) + spec->spec[0].idx);
break;
default:
return -EOPNOTSUPP;
@@ -930,7 +1025,7 @@ int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn,
int insn_idx, const struct bpf_core_relo *relo,
int relo_idx, const struct bpf_core_relo_res *res)
{
- __u32 orig_val, new_val;
+ __u64 orig_val, new_val;
__u8 class;
class = BPF_CLASS(insn->code);
@@ -955,28 +1050,30 @@ poison:
if (BPF_SRC(insn->code) != BPF_K)
return -EINVAL;
if (res->validate && insn->imm != orig_val) {
- pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
+ pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %llu -> %llu\n",
prog_name, relo_idx,
- insn_idx, insn->imm, orig_val, new_val);
+ insn_idx, insn->imm, (unsigned long long)orig_val,
+ (unsigned long long)new_val);
return -EINVAL;
}
orig_val = insn->imm;
insn->imm = new_val;
- pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
+ pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %llu -> %llu\n",
prog_name, relo_idx, insn_idx,
- orig_val, new_val);
+ (unsigned long long)orig_val, (unsigned long long)new_val);
break;
case BPF_LDX:
case BPF_ST:
case BPF_STX:
if (res->validate && insn->off != orig_val) {
- pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n",
- prog_name, relo_idx, insn_idx, insn->off, orig_val, new_val);
+ pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %llu -> %llu\n",
+ prog_name, relo_idx, insn_idx, insn->off, (unsigned long long)orig_val,
+ (unsigned long long)new_val);
return -EINVAL;
}
if (new_val > SHRT_MAX) {
- pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
- prog_name, relo_idx, insn_idx, new_val);
+ pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %llu\n",
+ prog_name, relo_idx, insn_idx, (unsigned long long)new_val);
return -ERANGE;
}
if (res->fail_memsz_adjust) {
@@ -988,8 +1085,9 @@ poison:
orig_val = insn->off;
insn->off = new_val;
- pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
- prog_name, relo_idx, insn_idx, orig_val, new_val);
+ pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %llu -> %llu\n",
+ prog_name, relo_idx, insn_idx, (unsigned long long)orig_val,
+ (unsigned long long)new_val);
if (res->new_sz != res->orig_sz) {
int insn_bytes_sz, insn_bpf_sz;
@@ -1025,20 +1123,20 @@ poison:
return -EINVAL;
}
- imm = insn[0].imm + ((__u64)insn[1].imm << 32);
+ imm = (__u32)insn[0].imm | ((__u64)insn[1].imm << 32);
if (res->validate && imm != orig_val) {
- pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n",
+ pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %llu -> %llu\n",
prog_name, relo_idx,
insn_idx, (unsigned long long)imm,
- orig_val, new_val);
+ (unsigned long long)orig_val, (unsigned long long)new_val);
return -EINVAL;
}
insn[0].imm = new_val;
- insn[1].imm = 0; /* currently only 32-bit values are supported */
- pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n",
+ insn[1].imm = new_val >> 32;
+ pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %llu\n",
prog_name, relo_idx, insn_idx,
- (unsigned long long)imm, new_val);
+ (unsigned long long)imm, (unsigned long long)new_val);
break;
}
default:
@@ -1055,51 +1153,78 @@ poison:
* [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
* where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
*/
-static void bpf_core_dump_spec(const char *prog_name, int level, const struct bpf_core_spec *spec)
+int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *spec)
{
const struct btf_type *t;
- const struct btf_enum *e;
const char *s;
__u32 type_id;
- int i;
+ int i, len = 0;
+
+#define append_buf(fmt, args...) \
+ ({ \
+ int r; \
+ r = snprintf(buf, buf_sz, fmt, ##args); \
+ len += r; \
+ if (r >= buf_sz) \
+ r = buf_sz; \
+ buf += r; \
+ buf_sz -= r; \
+ })
type_id = spec->root_type_id;
t = btf_type_by_id(spec->btf, type_id);
s = btf__name_by_offset(spec->btf, t->name_off);
- libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
+ append_buf("<%s> [%u] %s %s",
+ core_relo_kind_str(spec->relo_kind),
+ type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
if (core_relo_is_type_based(spec->relo_kind))
- return;
+ return len;
if (core_relo_is_enumval_based(spec->relo_kind)) {
t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
- e = btf_enum(t) + spec->raw_spec[0];
- s = btf__name_by_offset(spec->btf, e->name_off);
+ if (btf_is_enum(t)) {
+ const struct btf_enum *e;
+ const char *fmt_str;
+
+ e = btf_enum(t) + spec->raw_spec[0];
+ s = btf__name_by_offset(spec->btf, e->name_off);
+ fmt_str = BTF_INFO_KFLAG(t->info) ? "::%s = %d" : "::%s = %u";
+ append_buf(fmt_str, s, e->val);
+ } else {
+ const struct btf_enum64 *e;
+ const char *fmt_str;
- libbpf_print(level, "::%s = %u", s, e->val);
- return;
+ e = btf_enum64(t) + spec->raw_spec[0];
+ s = btf__name_by_offset(spec->btf, e->name_off);
+ fmt_str = BTF_INFO_KFLAG(t->info) ? "::%s = %lld" : "::%s = %llu";
+ append_buf(fmt_str, s, (unsigned long long)btf_enum64_value(e));
+ }
+ return len;
}
if (core_relo_is_field_based(spec->relo_kind)) {
for (i = 0; i < spec->len; i++) {
if (spec->spec[i].name)
- libbpf_print(level, ".%s", spec->spec[i].name);
+ append_buf(".%s", spec->spec[i].name);
else if (i > 0 || spec->spec[i].idx > 0)
- libbpf_print(level, "[%u]", spec->spec[i].idx);
+ append_buf("[%u]", spec->spec[i].idx);
}
- libbpf_print(level, " (");
+ append_buf(" (");
for (i = 0; i < spec->raw_len; i++)
- libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
+ append_buf("%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
if (spec->bit_offset % 8)
- libbpf_print(level, " @ offset %u.%u)",
- spec->bit_offset / 8, spec->bit_offset % 8);
+ append_buf(" @ offset %u.%u)", spec->bit_offset / 8, spec->bit_offset % 8);
else
- libbpf_print(level, " @ offset %u)", spec->bit_offset / 8);
- return;
+ append_buf(" @ offset %u)", spec->bit_offset / 8);
+ return len;
}
+
+ return len;
+#undef append_buf
}
/*
@@ -1134,11 +1259,11 @@ static void bpf_core_dump_spec(const char *prog_name, int level, const struct bp
* 3. It is supported and expected that there might be multiple flavors
* matching the spec. As long as all the specs resolve to the same set of
* offsets across all candidates, there is no error. If there is any
- * ambiguity, CO-RE relocation will fail. This is necessary to accomodate
- * imprefection of BTF deduplication, which can cause slight duplication of
+ * ambiguity, CO-RE relocation will fail. This is necessary to accommodate
+ * imperfection of BTF deduplication, which can cause slight duplication of
* the same BTF type, if some directly or indirectly referenced (by
* pointer) type gets resolved to different actual types in different
- * object files. If such situation occurs, deduplicated BTF will end up
+ * object files. If such a situation occurs, deduplicated BTF will end up
* with two (or more) structurally identical types, which differ only in
* types they refer to through pointer. This should be OK in most cases and
* is not an error.
@@ -1167,7 +1292,7 @@ int bpf_core_calc_relo_insn(const char *prog_name,
const struct btf_type *local_type;
const char *local_name;
__u32 local_id;
- const char *spec_str;
+ char spec_buf[256];
int i, j, err;
local_id = relo->type_id;
@@ -1176,24 +1301,20 @@ int bpf_core_calc_relo_insn(const char *prog_name,
if (!local_name)
return -EINVAL;
- spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
- if (str_is_empty(spec_str))
- return -EINVAL;
-
- err = bpf_core_parse_spec(prog_name, local_btf, local_id, spec_str,
- relo->kind, local_spec);
+ err = bpf_core_parse_spec(prog_name, local_btf, relo, local_spec);
if (err) {
+ const char *spec_str;
+
+ spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
prog_name, relo_idx, local_id, btf_kind_str(local_type),
str_is_empty(local_name) ? "<anon>" : local_name,
- spec_str, err);
+ spec_str ?: "<?>", err);
return -EINVAL;
}
- pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog_name,
- relo_idx, core_relo_kind_str(relo->kind), relo->kind);
- bpf_core_dump_spec(prog_name, LIBBPF_DEBUG, local_spec);
- libbpf_print(LIBBPF_DEBUG, "\n");
+ bpf_core_format_spec(spec_buf, sizeof(spec_buf), local_spec);
+ pr_debug("prog '%s': relo #%d: %s\n", prog_name, relo_idx, spec_buf);
/* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
if (relo->kind == BPF_CORE_TYPE_ID_LOCAL) {
@@ -1207,7 +1328,7 @@ int bpf_core_calc_relo_insn(const char *prog_name,
}
/* libbpf doesn't support candidate search for anonymous types */
- if (str_is_empty(spec_str)) {
+ if (str_is_empty(local_name)) {
pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
prog_name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
return -EOPNOTSUPP;
@@ -1217,17 +1338,15 @@ int bpf_core_calc_relo_insn(const char *prog_name,
err = bpf_core_spec_match(local_spec, cands->cands[i].btf,
cands->cands[i].id, cand_spec);
if (err < 0) {
- pr_warn("prog '%s': relo #%d: error matching candidate #%d ",
- prog_name, relo_idx, i);
- bpf_core_dump_spec(prog_name, LIBBPF_WARN, cand_spec);
- libbpf_print(LIBBPF_WARN, ": %d\n", err);
+ bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec);
+ pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n ",
+ prog_name, relo_idx, i, spec_buf, err);
return err;
}
- pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog_name,
- relo_idx, err == 0 ? "non-matching" : "matching", i);
- bpf_core_dump_spec(prog_name, LIBBPF_DEBUG, cand_spec);
- libbpf_print(LIBBPF_DEBUG, "\n");
+ bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec);
+ pr_debug("prog '%s': relo #%d: %s candidate #%d %s\n", prog_name,
+ relo_idx, err == 0 ? "non-matching" : "matching", i, spec_buf);
if (err == 0)
continue;
@@ -1253,10 +1372,12 @@ int bpf_core_calc_relo_insn(const char *prog_name,
* decision and value, otherwise it's dangerous to
* proceed due to ambiguity
*/
- pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n",
+ pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %llu != %s %llu\n",
prog_name, relo_idx,
- cand_res.poison ? "failure" : "success", cand_res.new_val,
- targ_res->poison ? "failure" : "success", targ_res->new_val);
+ cand_res.poison ? "failure" : "success",
+ (unsigned long long)cand_res.new_val,
+ targ_res->poison ? "failure" : "success",
+ (unsigned long long)targ_res->new_val);
return -EINVAL;
}
@@ -1297,3 +1418,273 @@ int bpf_core_calc_relo_insn(const char *prog_name,
return 0;
}
+
+static bool bpf_core_names_match(const struct btf *local_btf, size_t local_name_off,
+ const struct btf *targ_btf, size_t targ_name_off)
+{
+ const char *local_n, *targ_n;
+ size_t local_len, targ_len;
+
+ local_n = btf__name_by_offset(local_btf, local_name_off);
+ targ_n = btf__name_by_offset(targ_btf, targ_name_off);
+
+ if (str_is_empty(targ_n))
+ return str_is_empty(local_n);
+
+ targ_len = bpf_core_essential_name_len(targ_n);
+ local_len = bpf_core_essential_name_len(local_n);
+
+ return targ_len == local_len && strncmp(local_n, targ_n, local_len) == 0;
+}
+
+static int bpf_core_enums_match(const struct btf *local_btf, const struct btf_type *local_t,
+ const struct btf *targ_btf, const struct btf_type *targ_t)
+{
+ __u16 local_vlen = btf_vlen(local_t);
+ __u16 targ_vlen = btf_vlen(targ_t);
+ int i, j;
+
+ if (local_t->size != targ_t->size)
+ return 0;
+
+ if (local_vlen > targ_vlen)
+ return 0;
+
+ /* iterate over the local enum's variants and make sure each has
+ * a symbolic name correspondent in the target
+ */
+ for (i = 0; i < local_vlen; i++) {
+ bool matched = false;
+ __u32 local_n_off, targ_n_off;
+
+ local_n_off = btf_is_enum(local_t) ? btf_enum(local_t)[i].name_off :
+ btf_enum64(local_t)[i].name_off;
+
+ for (j = 0; j < targ_vlen; j++) {
+ targ_n_off = btf_is_enum(targ_t) ? btf_enum(targ_t)[j].name_off :
+ btf_enum64(targ_t)[j].name_off;
+
+ if (bpf_core_names_match(local_btf, local_n_off, targ_btf, targ_n_off)) {
+ matched = true;
+ break;
+ }
+ }
+
+ if (!matched)
+ return 0;
+ }
+ return 1;
+}
+
+static int bpf_core_composites_match(const struct btf *local_btf, const struct btf_type *local_t,
+ const struct btf *targ_btf, const struct btf_type *targ_t,
+ bool behind_ptr, int level)
+{
+ const struct btf_member *local_m = btf_members(local_t);
+ __u16 local_vlen = btf_vlen(local_t);
+ __u16 targ_vlen = btf_vlen(targ_t);
+ int i, j, err;
+
+ if (local_vlen > targ_vlen)
+ return 0;
+
+ /* check that all local members have a match in the target */
+ for (i = 0; i < local_vlen; i++, local_m++) {
+ const struct btf_member *targ_m = btf_members(targ_t);
+ bool matched = false;
+
+ for (j = 0; j < targ_vlen; j++, targ_m++) {
+ if (!bpf_core_names_match(local_btf, local_m->name_off,
+ targ_btf, targ_m->name_off))
+ continue;
+
+ err = __bpf_core_types_match(local_btf, local_m->type, targ_btf,
+ targ_m->type, behind_ptr, level - 1);
+ if (err < 0)
+ return err;
+ if (err > 0) {
+ matched = true;
+ break;
+ }
+ }
+
+ if (!matched)
+ return 0;
+ }
+ return 1;
+}
+
+/* Check that two types "match". This function assumes that root types were
+ * already checked for name match.
+ *
+ * The matching relation is defined as follows:
+ * - modifiers and typedefs are stripped (and, hence, effectively ignored)
+ * - generally speaking types need to be of same kind (struct vs. struct, union
+ * vs. union, etc.)
+ * - exceptions are struct/union behind a pointer which could also match a
+ * forward declaration of a struct or union, respectively, and enum vs.
+ * enum64 (see below)
+ * Then, depending on type:
+ * - integers:
+ * - match if size and signedness match
+ * - arrays & pointers:
+ * - target types are recursively matched
+ * - structs & unions:
+ * - local members need to exist in target with the same name
+ * - for each member we recursively check match unless it is already behind a
+ * pointer, in which case we only check matching names and compatible kind
+ * - enums:
+ * - local variants have to have a match in target by symbolic name (but not
+ * numeric value)
+ * - size has to match (but enum may match enum64 and vice versa)
+ * - function pointers:
+ * - number and position of arguments in local type has to match target
+ * - for each argument and the return value we recursively check match
+ */
+int __bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf,
+ __u32 targ_id, bool behind_ptr, int level)
+{
+ const struct btf_type *local_t, *targ_t;
+ int depth = 32; /* max recursion depth */
+ __u16 local_k, targ_k;
+
+ if (level <= 0)
+ return -EINVAL;
+
+ local_t = btf_type_by_id(local_btf, local_id);
+ targ_t = btf_type_by_id(targ_btf, targ_id);
+
+recur:
+ depth--;
+ if (depth < 0)
+ return -EINVAL;
+
+ local_t = skip_mods_and_typedefs(local_btf, local_id, &local_id);
+ targ_t = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
+ if (!local_t || !targ_t)
+ return -EINVAL;
+
+ /* While the name check happens after typedefs are skipped, root-level
+ * typedefs would still be name-matched as that's the contract with
+ * callers.
+ */
+ if (!bpf_core_names_match(local_btf, local_t->name_off, targ_btf, targ_t->name_off))
+ return 0;
+
+ local_k = btf_kind(local_t);
+ targ_k = btf_kind(targ_t);
+
+ switch (local_k) {
+ case BTF_KIND_UNKN:
+ return local_k == targ_k;
+ case BTF_KIND_FWD: {
+ bool local_f = BTF_INFO_KFLAG(local_t->info);
+
+ if (behind_ptr) {
+ if (local_k == targ_k)
+ return local_f == BTF_INFO_KFLAG(targ_t->info);
+
+ /* for forward declarations kflag dictates whether the
+ * target is a struct (0) or union (1)
+ */
+ return (targ_k == BTF_KIND_STRUCT && !local_f) ||
+ (targ_k == BTF_KIND_UNION && local_f);
+ } else {
+ if (local_k != targ_k)
+ return 0;
+
+ /* match if the forward declaration is for the same kind */
+ return local_f == BTF_INFO_KFLAG(targ_t->info);
+ }
+ }
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ if (!btf_is_any_enum(targ_t))
+ return 0;
+
+ return bpf_core_enums_match(local_btf, local_t, targ_btf, targ_t);
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ if (behind_ptr) {
+ bool targ_f = BTF_INFO_KFLAG(targ_t->info);
+
+ if (local_k == targ_k)
+ return 1;
+
+ if (targ_k != BTF_KIND_FWD)
+ return 0;
+
+ return (local_k == BTF_KIND_UNION) == targ_f;
+ } else {
+ if (local_k != targ_k)
+ return 0;
+
+ return bpf_core_composites_match(local_btf, local_t, targ_btf, targ_t,
+ behind_ptr, level);
+ }
+ case BTF_KIND_INT: {
+ __u8 local_sgn;
+ __u8 targ_sgn;
+
+ if (local_k != targ_k)
+ return 0;
+
+ local_sgn = btf_int_encoding(local_t) & BTF_INT_SIGNED;
+ targ_sgn = btf_int_encoding(targ_t) & BTF_INT_SIGNED;
+
+ return local_t->size == targ_t->size && local_sgn == targ_sgn;
+ }
+ case BTF_KIND_PTR:
+ if (local_k != targ_k)
+ return 0;
+
+ behind_ptr = true;
+
+ local_id = local_t->type;
+ targ_id = targ_t->type;
+ goto recur;
+ case BTF_KIND_ARRAY: {
+ const struct btf_array *local_array = btf_array(local_t);
+ const struct btf_array *targ_array = btf_array(targ_t);
+
+ if (local_k != targ_k)
+ return 0;
+
+ if (local_array->nelems != targ_array->nelems)
+ return 0;
+
+ local_id = local_array->type;
+ targ_id = targ_array->type;
+ goto recur;
+ }
+ case BTF_KIND_FUNC_PROTO: {
+ struct btf_param *local_p = btf_params(local_t);
+ struct btf_param *targ_p = btf_params(targ_t);
+ __u16 local_vlen = btf_vlen(local_t);
+ __u16 targ_vlen = btf_vlen(targ_t);
+ int i, err;
+
+ if (local_k != targ_k)
+ return 0;
+
+ if (local_vlen != targ_vlen)
+ return 0;
+
+ for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
+ err = __bpf_core_types_match(local_btf, local_p->type, targ_btf,
+ targ_p->type, behind_ptr, level - 1);
+ if (err <= 0)
+ return err;
+ }
+
+ /* tail recurse for return type check */
+ local_id = local_t->type;
+ targ_id = targ_t->type;
+ goto recur;
+ }
+ default:
+ pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
+ btf_kind_str(local_t), local_id, targ_id);
+ return 0;
+ }
+}
diff --git a/tools/lib/bpf/relo_core.h b/tools/lib/bpf/relo_core.h
index a28bf3711ce2..1c0566daf8e8 100644
--- a/tools/lib/bpf/relo_core.h
+++ b/tools/lib/bpf/relo_core.h
@@ -46,9 +46,9 @@ struct bpf_core_spec {
struct bpf_core_relo_res {
/* expected value in the instruction, unless validate == false */
- __u32 orig_val;
+ __u64 orig_val;
/* new value that needs to be patched up to */
- __u32 new_val;
+ __u64 new_val;
/* relocation unsuccessful, poison instruction, but don't fail load */
bool poison;
/* some relocations can't be validated against orig_val */
@@ -68,8 +68,14 @@ struct bpf_core_relo_res {
__u32 new_type_id;
};
+int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
+ const struct btf *targ_btf, __u32 targ_id, int level);
int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
const struct btf *targ_btf, __u32 targ_id);
+int __bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf,
+ __u32 targ_id, bool behind_ptr, int level);
+int bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf,
+ __u32 targ_id);
size_t bpf_core_essential_name_len(const char *name);
@@ -84,4 +90,10 @@ int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn,
int insn_idx, const struct bpf_core_relo *relo,
int relo_idx, const struct bpf_core_relo_res *res);
+int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
+ const struct bpf_core_relo *relo,
+ struct bpf_core_spec *spec);
+
+int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *spec);
+
#endif
diff --git a/tools/lib/bpf/usdt.bpf.h b/tools/lib/bpf/usdt.bpf.h
new file mode 100644
index 000000000000..4f2adc0bd6ca
--- /dev/null
+++ b/tools/lib/bpf/usdt.bpf.h
@@ -0,0 +1,247 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#ifndef __USDT_BPF_H__
+#define __USDT_BPF_H__
+
+#include <linux/errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+/* Below types and maps are internal implementation details of libbpf's USDT
+ * support and are subjects to change. Also, bpf_usdt_xxx() API helpers should
+ * be considered an unstable API as well and might be adjusted based on user
+ * feedback from using libbpf's USDT support in production.
+ */
+
+/* User can override BPF_USDT_MAX_SPEC_CNT to change default size of internal
+ * map that keeps track of USDT argument specifications. This might be
+ * necessary if there are a lot of USDT attachments.
+ */
+#ifndef BPF_USDT_MAX_SPEC_CNT
+#define BPF_USDT_MAX_SPEC_CNT 256
+#endif
+/* User can override BPF_USDT_MAX_IP_CNT to change default size of internal
+ * map that keeps track of IP (memory address) mapping to USDT argument
+ * specification.
+ * Note, if kernel supports BPF cookies, this map is not used and could be
+ * resized all the way to 1 to save a bit of memory.
+ */
+#ifndef BPF_USDT_MAX_IP_CNT
+#define BPF_USDT_MAX_IP_CNT (4 * BPF_USDT_MAX_SPEC_CNT)
+#endif
+
+enum __bpf_usdt_arg_type {
+ BPF_USDT_ARG_CONST,
+ BPF_USDT_ARG_REG,
+ BPF_USDT_ARG_REG_DEREF,
+};
+
+struct __bpf_usdt_arg_spec {
+ /* u64 scalar interpreted depending on arg_type, see below */
+ __u64 val_off;
+ /* arg location case, see bpf_udst_arg() for details */
+ enum __bpf_usdt_arg_type arg_type;
+ /* offset of referenced register within struct pt_regs */
+ short reg_off;
+ /* whether arg should be interpreted as signed value */
+ bool arg_signed;
+ /* number of bits that need to be cleared and, optionally,
+ * sign-extended to cast arguments that are 1, 2, or 4 bytes
+ * long into final 8-byte u64/s64 value returned to user
+ */
+ char arg_bitshift;
+};
+
+/* should match USDT_MAX_ARG_CNT in usdt.c exactly */
+#define BPF_USDT_MAX_ARG_CNT 12
+struct __bpf_usdt_spec {
+ struct __bpf_usdt_arg_spec args[BPF_USDT_MAX_ARG_CNT];
+ __u64 usdt_cookie;
+ short arg_cnt;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, BPF_USDT_MAX_SPEC_CNT);
+ __type(key, int);
+ __type(value, struct __bpf_usdt_spec);
+} __bpf_usdt_specs SEC(".maps") __weak;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, BPF_USDT_MAX_IP_CNT);
+ __type(key, long);
+ __type(value, __u32);
+} __bpf_usdt_ip_to_spec_id SEC(".maps") __weak;
+
+extern const _Bool LINUX_HAS_BPF_COOKIE __kconfig;
+
+static __always_inline
+int __bpf_usdt_spec_id(struct pt_regs *ctx)
+{
+ if (!LINUX_HAS_BPF_COOKIE) {
+ long ip = PT_REGS_IP(ctx);
+ int *spec_id_ptr;
+
+ spec_id_ptr = bpf_map_lookup_elem(&__bpf_usdt_ip_to_spec_id, &ip);
+ return spec_id_ptr ? *spec_id_ptr : -ESRCH;
+ }
+
+ return bpf_get_attach_cookie(ctx);
+}
+
+/* Return number of USDT arguments defined for currently traced USDT. */
+__weak __hidden
+int bpf_usdt_arg_cnt(struct pt_regs *ctx)
+{
+ struct __bpf_usdt_spec *spec;
+ int spec_id;
+
+ spec_id = __bpf_usdt_spec_id(ctx);
+ if (spec_id < 0)
+ return -ESRCH;
+
+ spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id);
+ if (!spec)
+ return -ESRCH;
+
+ return spec->arg_cnt;
+}
+
+/* Fetch USDT argument #*arg_num* (zero-indexed) and put its value into *res.
+ * Returns 0 on success; negative error, otherwise.
+ * On error *res is guaranteed to be set to zero.
+ */
+__weak __hidden
+int bpf_usdt_arg(struct pt_regs *ctx, __u64 arg_num, long *res)
+{
+ struct __bpf_usdt_spec *spec;
+ struct __bpf_usdt_arg_spec *arg_spec;
+ unsigned long val;
+ int err, spec_id;
+
+ *res = 0;
+
+ spec_id = __bpf_usdt_spec_id(ctx);
+ if (spec_id < 0)
+ return -ESRCH;
+
+ spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id);
+ if (!spec)
+ return -ESRCH;
+
+ if (arg_num >= BPF_USDT_MAX_ARG_CNT || arg_num >= spec->arg_cnt)
+ return -ENOENT;
+
+ arg_spec = &spec->args[arg_num];
+ switch (arg_spec->arg_type) {
+ case BPF_USDT_ARG_CONST:
+ /* Arg is just a constant ("-4@$-9" in USDT arg spec).
+ * value is recorded in arg_spec->val_off directly.
+ */
+ val = arg_spec->val_off;
+ break;
+ case BPF_USDT_ARG_REG:
+ /* Arg is in a register (e.g, "8@%rax" in USDT arg spec),
+ * so we read the contents of that register directly from
+ * struct pt_regs. To keep things simple user-space parts
+ * record offsetof(struct pt_regs, <regname>) in arg_spec->reg_off.
+ */
+ err = bpf_probe_read_kernel(&val, sizeof(val), (void *)ctx + arg_spec->reg_off);
+ if (err)
+ return err;
+ break;
+ case BPF_USDT_ARG_REG_DEREF:
+ /* Arg is in memory addressed by register, plus some offset
+ * (e.g., "-4@-1204(%rbp)" in USDT arg spec). Register is
+ * identified like with BPF_USDT_ARG_REG case, and the offset
+ * is in arg_spec->val_off. We first fetch register contents
+ * from pt_regs, then do another user-space probe read to
+ * fetch argument value itself.
+ */
+ err = bpf_probe_read_kernel(&val, sizeof(val), (void *)ctx + arg_spec->reg_off);
+ if (err)
+ return err;
+ err = bpf_probe_read_user(&val, sizeof(val), (void *)val + arg_spec->val_off);
+ if (err)
+ return err;
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ val >>= arg_spec->arg_bitshift;
+#endif
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* cast arg from 1, 2, or 4 bytes to final 8 byte size clearing
+ * necessary upper arg_bitshift bits, with sign extension if argument
+ * is signed
+ */
+ val <<= arg_spec->arg_bitshift;
+ if (arg_spec->arg_signed)
+ val = ((long)val) >> arg_spec->arg_bitshift;
+ else
+ val = val >> arg_spec->arg_bitshift;
+ *res = val;
+ return 0;
+}
+
+/* Retrieve user-specified cookie value provided during attach as
+ * bpf_usdt_opts.usdt_cookie. This serves the same purpose as BPF cookie
+ * returned by bpf_get_attach_cookie(). Libbpf's support for USDT is itself
+ * utilizing BPF cookies internally, so user can't use BPF cookie directly
+ * for USDT programs and has to use bpf_usdt_cookie() API instead.
+ */
+__weak __hidden
+long bpf_usdt_cookie(struct pt_regs *ctx)
+{
+ struct __bpf_usdt_spec *spec;
+ int spec_id;
+
+ spec_id = __bpf_usdt_spec_id(ctx);
+ if (spec_id < 0)
+ return 0;
+
+ spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id);
+ if (!spec)
+ return 0;
+
+ return spec->usdt_cookie;
+}
+
+/* we rely on ___bpf_apply() and ___bpf_narg() macros already defined in bpf_tracing.h */
+#define ___bpf_usdt_args0() ctx
+#define ___bpf_usdt_args1(x) ___bpf_usdt_args0(), ({ long _x; bpf_usdt_arg(ctx, 0, &_x); (void *)_x; })
+#define ___bpf_usdt_args2(x, args...) ___bpf_usdt_args1(args), ({ long _x; bpf_usdt_arg(ctx, 1, &_x); (void *)_x; })
+#define ___bpf_usdt_args3(x, args...) ___bpf_usdt_args2(args), ({ long _x; bpf_usdt_arg(ctx, 2, &_x); (void *)_x; })
+#define ___bpf_usdt_args4(x, args...) ___bpf_usdt_args3(args), ({ long _x; bpf_usdt_arg(ctx, 3, &_x); (void *)_x; })
+#define ___bpf_usdt_args5(x, args...) ___bpf_usdt_args4(args), ({ long _x; bpf_usdt_arg(ctx, 4, &_x); (void *)_x; })
+#define ___bpf_usdt_args6(x, args...) ___bpf_usdt_args5(args), ({ long _x; bpf_usdt_arg(ctx, 5, &_x); (void *)_x; })
+#define ___bpf_usdt_args7(x, args...) ___bpf_usdt_args6(args), ({ long _x; bpf_usdt_arg(ctx, 6, &_x); (void *)_x; })
+#define ___bpf_usdt_args8(x, args...) ___bpf_usdt_args7(args), ({ long _x; bpf_usdt_arg(ctx, 7, &_x); (void *)_x; })
+#define ___bpf_usdt_args9(x, args...) ___bpf_usdt_args8(args), ({ long _x; bpf_usdt_arg(ctx, 8, &_x); (void *)_x; })
+#define ___bpf_usdt_args10(x, args...) ___bpf_usdt_args9(args), ({ long _x; bpf_usdt_arg(ctx, 9, &_x); (void *)_x; })
+#define ___bpf_usdt_args11(x, args...) ___bpf_usdt_args10(args), ({ long _x; bpf_usdt_arg(ctx, 10, &_x); (void *)_x; })
+#define ___bpf_usdt_args12(x, args...) ___bpf_usdt_args11(args), ({ long _x; bpf_usdt_arg(ctx, 11, &_x); (void *)_x; })
+#define ___bpf_usdt_args(args...) ___bpf_apply(___bpf_usdt_args, ___bpf_narg(args))(args)
+
+/*
+ * BPF_USDT serves the same purpose for USDT handlers as BPF_PROG for
+ * tp_btf/fentry/fexit BPF programs and BPF_KPROBE for kprobes.
+ * Original struct pt_regs * context is preserved as 'ctx' argument.
+ */
+#define BPF_USDT(name, args...) \
+name(struct pt_regs *ctx); \
+static __attribute__((always_inline)) typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args); \
+typeof(name(0)) name(struct pt_regs *ctx) \
+{ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ return ____##name(___bpf_usdt_args(args)); \
+ _Pragma("GCC diagnostic pop") \
+} \
+static __attribute__((always_inline)) typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args)
+
+#endif /* __USDT_BPF_H__ */
diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c
new file mode 100644
index 000000000000..d18e37982344
--- /dev/null
+++ b/tools/lib/bpf/usdt.c
@@ -0,0 +1,1519 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <libelf.h>
+#include <gelf.h>
+#include <unistd.h>
+#include <linux/ptrace.h>
+#include <linux/kernel.h>
+
+/* s8 will be marked as poison while it's a reg of riscv */
+#if defined(__riscv)
+#define rv_s8 s8
+#endif
+
+#include "bpf.h"
+#include "libbpf.h"
+#include "libbpf_common.h"
+#include "libbpf_internal.h"
+#include "hashmap.h"
+
+/* libbpf's USDT support consists of BPF-side state/code and user-space
+ * state/code working together in concert. BPF-side parts are defined in
+ * usdt.bpf.h header library. User-space state is encapsulated by struct
+ * usdt_manager and all the supporting code centered around usdt_manager.
+ *
+ * usdt.bpf.h defines two BPF maps that usdt_manager expects: USDT spec map
+ * and IP-to-spec-ID map, which is auxiliary map necessary for kernels that
+ * don't support BPF cookie (see below). These two maps are implicitly
+ * embedded into user's end BPF object file when user's code included
+ * usdt.bpf.h. This means that libbpf doesn't do anything special to create
+ * these USDT support maps. They are created by normal libbpf logic of
+ * instantiating BPF maps when opening and loading BPF object.
+ *
+ * As such, libbpf is basically unaware of the need to do anything
+ * USDT-related until the very first call to bpf_program__attach_usdt(), which
+ * can be called by user explicitly or happen automatically during skeleton
+ * attach (or, equivalently, through generic bpf_program__attach() call). At
+ * this point, libbpf will instantiate and initialize struct usdt_manager and
+ * store it in bpf_object. USDT manager is per-BPF object construct, as each
+ * independent BPF object might or might not have USDT programs, and thus all
+ * the expected USDT-related state. There is no coordination between two
+ * bpf_object in parts of USDT attachment, they are oblivious of each other's
+ * existence and libbpf is just oblivious, dealing with bpf_object-specific
+ * USDT state.
+ *
+ * Quick crash course on USDTs.
+ *
+ * From user-space application's point of view, USDT is essentially just
+ * a slightly special function call that normally has zero overhead, unless it
+ * is being traced by some external entity (e.g, BPF-based tool). Here's how
+ * a typical application can trigger USDT probe:
+ *
+ * #include <sys/sdt.h> // provided by systemtap-sdt-devel package
+ * // folly also provide similar functionality in folly/tracing/StaticTracepoint.h
+ *
+ * STAP_PROBE3(my_usdt_provider, my_usdt_probe_name, 123, x, &y);
+ *
+ * USDT is identified by it's <provider-name>:<probe-name> pair of names. Each
+ * individual USDT has a fixed number of arguments (3 in the above example)
+ * and specifies values of each argument as if it was a function call.
+ *
+ * USDT call is actually not a function call, but is instead replaced by
+ * a single NOP instruction (thus zero overhead, effectively). But in addition
+ * to that, those USDT macros generate special SHT_NOTE ELF records in
+ * .note.stapsdt ELF section. Here's an example USDT definition as emitted by
+ * `readelf -n <binary>`:
+ *
+ * stapsdt 0x00000089 NT_STAPSDT (SystemTap probe descriptors)
+ * Provider: test
+ * Name: usdt12
+ * Location: 0x0000000000549df3, Base: 0x00000000008effa4, Semaphore: 0x0000000000a4606e
+ * Arguments: -4@-1204(%rbp) -4@%edi -8@-1216(%rbp) -8@%r8 -4@$5 -8@%r9 8@%rdx 8@%r10 -4@$-9 -2@%cx -2@%ax -1@%sil
+ *
+ * In this case we have USDT test:usdt12 with 12 arguments.
+ *
+ * Location and base are offsets used to calculate absolute IP address of that
+ * NOP instruction that kernel can replace with an interrupt instruction to
+ * trigger instrumentation code (BPF program for all that we care about).
+ *
+ * Semaphore above is and optional feature. It records an address of a 2-byte
+ * refcount variable (normally in '.probes' ELF section) used for signaling if
+ * there is anything that is attached to USDT. This is useful for user
+ * applications if, for example, they need to prepare some arguments that are
+ * passed only to USDTs and preparation is expensive. By checking if USDT is
+ * "activated", an application can avoid paying those costs unnecessarily.
+ * Recent enough kernel has built-in support for automatically managing this
+ * refcount, which libbpf expects and relies on. If USDT is defined without
+ * associated semaphore, this value will be zero. See selftests for semaphore
+ * examples.
+ *
+ * Arguments is the most interesting part. This USDT specification string is
+ * providing information about all the USDT arguments and their locations. The
+ * part before @ sign defined byte size of the argument (1, 2, 4, or 8) and
+ * whether the argument is signed or unsigned (negative size means signed).
+ * The part after @ sign is assembly-like definition of argument location
+ * (see [0] for more details). Technically, assembler can provide some pretty
+ * advanced definitions, but libbpf is currently supporting three most common
+ * cases:
+ * 1) immediate constant, see 5th and 9th args above (-4@$5 and -4@-9);
+ * 2) register value, e.g., 8@%rdx, which means "unsigned 8-byte integer
+ * whose value is in register %rdx";
+ * 3) memory dereference addressed by register, e.g., -4@-1204(%rbp), which
+ * specifies signed 32-bit integer stored at offset -1204 bytes from
+ * memory address stored in %rbp.
+ *
+ * [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
+ *
+ * During attachment, libbpf parses all the relevant USDT specifications and
+ * prepares `struct usdt_spec` (USDT spec), which is then provided to BPF-side
+ * code through spec map. This allows BPF applications to quickly fetch the
+ * actual value at runtime using a simple BPF-side code.
+ *
+ * With basics out of the way, let's go over less immediately obvious aspects
+ * of supporting USDTs.
+ *
+ * First, there is no special USDT BPF program type. It is actually just
+ * a uprobe BPF program (which for kernel, at least currently, is just a kprobe
+ * program, so BPF_PROG_TYPE_KPROBE program type). With the only difference
+ * that uprobe is usually attached at the function entry, while USDT will
+ * normally will be somewhere inside the function. But it should always be
+ * pointing to NOP instruction, which makes such uprobes the fastest uprobe
+ * kind.
+ *
+ * Second, it's important to realize that such STAP_PROBEn(provider, name, ...)
+ * macro invocations can end up being inlined many-many times, depending on
+ * specifics of each individual user application. So single conceptual USDT
+ * (identified by provider:name pair of identifiers) is, generally speaking,
+ * multiple uprobe locations (USDT call sites) in different places in user
+ * application. Further, again due to inlining, each USDT call site might end
+ * up having the same argument #N be located in a different place. In one call
+ * site it could be a constant, in another will end up in a register, and in
+ * yet another could be some other register or even somewhere on the stack.
+ *
+ * As such, "attaching to USDT" means (in general case) attaching the same
+ * uprobe BPF program to multiple target locations in user application, each
+ * potentially having a completely different USDT spec associated with it.
+ * To wire all this up together libbpf allocates a unique integer spec ID for
+ * each unique USDT spec. Spec IDs are allocated as sequential small integers
+ * so that they can be used as keys in array BPF map (for performance reasons).
+ * Spec ID allocation and accounting is big part of what usdt_manager is
+ * about. This state has to be maintained per-BPF object and coordinate
+ * between different USDT attachments within the same BPF object.
+ *
+ * Spec ID is the key in spec BPF map, value is the actual USDT spec layed out
+ * as struct usdt_spec. Each invocation of BPF program at runtime needs to
+ * know its associated spec ID. It gets it either through BPF cookie, which
+ * libbpf sets to spec ID during attach time, or, if kernel is too old to
+ * support BPF cookie, through IP-to-spec-ID map that libbpf maintains in such
+ * case. The latter means that some modes of operation can't be supported
+ * without BPF cookie. Such mode is attaching to shared library "generically",
+ * without specifying target process. In such case, it's impossible to
+ * calculate absolute IP addresses for IP-to-spec-ID map, and thus such mode
+ * is not supported without BPF cookie support.
+ *
+ * Note that libbpf is using BPF cookie functionality for its own internal
+ * needs, so user itself can't rely on BPF cookie feature. To that end, libbpf
+ * provides conceptually equivalent USDT cookie support. It's still u64
+ * user-provided value that can be associated with USDT attachment. Note that
+ * this will be the same value for all USDT call sites within the same single
+ * *logical* USDT attachment. This makes sense because to user attaching to
+ * USDT is a single BPF program triggered for singular USDT probe. The fact
+ * that this is done at multiple actual locations is a mostly hidden
+ * implementation details. This USDT cookie value can be fetched with
+ * bpf_usdt_cookie(ctx) API provided by usdt.bpf.h
+ *
+ * Lastly, while single USDT can have tons of USDT call sites, it doesn't
+ * necessarily have that many different USDT specs. It very well might be
+ * that 1000 USDT call sites only need 5 different USDT specs, because all the
+ * arguments are typically contained in a small set of registers or stack
+ * locations. As such, it's wasteful to allocate as many USDT spec IDs as
+ * there are USDT call sites. So libbpf tries to be frugal and performs
+ * on-the-fly deduplication during a single USDT attachment to only allocate
+ * the minimal required amount of unique USDT specs (and thus spec IDs). This
+ * is trivially achieved by using USDT spec string (Arguments string from USDT
+ * note) as a lookup key in a hashmap. USDT spec string uniquely defines
+ * everything about how to fetch USDT arguments, so two USDT call sites
+ * sharing USDT spec string can safely share the same USDT spec and spec ID.
+ * Note, this spec string deduplication is happening only during the same USDT
+ * attachment, so each USDT spec shares the same USDT cookie value. This is
+ * not generally true for other USDT attachments within the same BPF object,
+ * as even if USDT spec string is the same, USDT cookie value can be
+ * different. It was deemed excessive to try to deduplicate across independent
+ * USDT attachments by taking into account USDT spec string *and* USDT cookie
+ * value, which would complicated spec ID accounting significantly for little
+ * gain.
+ */
+
+#define USDT_BASE_SEC ".stapsdt.base"
+#define USDT_SEMA_SEC ".probes"
+#define USDT_NOTE_SEC ".note.stapsdt"
+#define USDT_NOTE_TYPE 3
+#define USDT_NOTE_NAME "stapsdt"
+
+/* should match exactly enum __bpf_usdt_arg_type from usdt.bpf.h */
+enum usdt_arg_type {
+ USDT_ARG_CONST,
+ USDT_ARG_REG,
+ USDT_ARG_REG_DEREF,
+};
+
+/* should match exactly struct __bpf_usdt_arg_spec from usdt.bpf.h */
+struct usdt_arg_spec {
+ __u64 val_off;
+ enum usdt_arg_type arg_type;
+ short reg_off;
+ bool arg_signed;
+ char arg_bitshift;
+};
+
+/* should match BPF_USDT_MAX_ARG_CNT in usdt.bpf.h */
+#define USDT_MAX_ARG_CNT 12
+
+/* should match struct __bpf_usdt_spec from usdt.bpf.h */
+struct usdt_spec {
+ struct usdt_arg_spec args[USDT_MAX_ARG_CNT];
+ __u64 usdt_cookie;
+ short arg_cnt;
+};
+
+struct usdt_note {
+ const char *provider;
+ const char *name;
+ /* USDT args specification string, e.g.:
+ * "-4@%esi -4@-24(%rbp) -4@%ecx 2@%ax 8@%rdx"
+ */
+ const char *args;
+ long loc_addr;
+ long base_addr;
+ long sema_addr;
+};
+
+struct usdt_target {
+ long abs_ip;
+ long rel_ip;
+ long sema_off;
+ struct usdt_spec spec;
+ const char *spec_str;
+};
+
+struct usdt_manager {
+ struct bpf_map *specs_map;
+ struct bpf_map *ip_to_spec_id_map;
+
+ int *free_spec_ids;
+ size_t free_spec_cnt;
+ size_t next_free_spec_id;
+
+ bool has_bpf_cookie;
+ bool has_sema_refcnt;
+};
+
+struct usdt_manager *usdt_manager_new(struct bpf_object *obj)
+{
+ static const char *ref_ctr_sysfs_path = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset";
+ struct usdt_manager *man;
+ struct bpf_map *specs_map, *ip_to_spec_id_map;
+
+ specs_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_specs");
+ ip_to_spec_id_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_ip_to_spec_id");
+ if (!specs_map || !ip_to_spec_id_map) {
+ pr_warn("usdt: failed to find USDT support BPF maps, did you forget to include bpf/usdt.bpf.h?\n");
+ return ERR_PTR(-ESRCH);
+ }
+
+ man = calloc(1, sizeof(*man));
+ if (!man)
+ return ERR_PTR(-ENOMEM);
+
+ man->specs_map = specs_map;
+ man->ip_to_spec_id_map = ip_to_spec_id_map;
+
+ /* Detect if BPF cookie is supported for kprobes.
+ * We don't need IP-to-ID mapping if we can use BPF cookies.
+ * Added in: 7adfc6c9b315 ("bpf: Add bpf_get_attach_cookie() BPF helper to access bpf_cookie value")
+ */
+ man->has_bpf_cookie = kernel_supports(obj, FEAT_BPF_COOKIE);
+
+ /* Detect kernel support for automatic refcounting of USDT semaphore.
+ * If this is not supported, USDTs with semaphores will not be supported.
+ * Added in: a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe")
+ */
+ man->has_sema_refcnt = access(ref_ctr_sysfs_path, F_OK) == 0;
+
+ return man;
+}
+
+void usdt_manager_free(struct usdt_manager *man)
+{
+ if (IS_ERR_OR_NULL(man))
+ return;
+
+ free(man->free_spec_ids);
+ free(man);
+}
+
+static int sanity_check_usdt_elf(Elf *elf, const char *path)
+{
+ GElf_Ehdr ehdr;
+ int endianness;
+
+ if (elf_kind(elf) != ELF_K_ELF) {
+ pr_warn("usdt: unrecognized ELF kind %d for '%s'\n", elf_kind(elf), path);
+ return -EBADF;
+ }
+
+ switch (gelf_getclass(elf)) {
+ case ELFCLASS64:
+ if (sizeof(void *) != 8) {
+ pr_warn("usdt: attaching to 64-bit ELF binary '%s' is not supported\n", path);
+ return -EBADF;
+ }
+ break;
+ case ELFCLASS32:
+ if (sizeof(void *) != 4) {
+ pr_warn("usdt: attaching to 32-bit ELF binary '%s' is not supported\n", path);
+ return -EBADF;
+ }
+ break;
+ default:
+ pr_warn("usdt: unsupported ELF class for '%s'\n", path);
+ return -EBADF;
+ }
+
+ if (!gelf_getehdr(elf, &ehdr))
+ return -EINVAL;
+
+ if (ehdr.e_type != ET_EXEC && ehdr.e_type != ET_DYN) {
+ pr_warn("usdt: unsupported type of ELF binary '%s' (%d), only ET_EXEC and ET_DYN are supported\n",
+ path, ehdr.e_type);
+ return -EBADF;
+ }
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ endianness = ELFDATA2LSB;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ endianness = ELFDATA2MSB;
+#else
+# error "Unrecognized __BYTE_ORDER__"
+#endif
+ if (endianness != ehdr.e_ident[EI_DATA]) {
+ pr_warn("usdt: ELF endianness mismatch for '%s'\n", path);
+ return -EBADF;
+ }
+
+ return 0;
+}
+
+static int find_elf_sec_by_name(Elf *elf, const char *sec_name, GElf_Shdr *shdr, Elf_Scn **scn)
+{
+ Elf_Scn *sec = NULL;
+ size_t shstrndx;
+
+ if (elf_getshdrstrndx(elf, &shstrndx))
+ return -EINVAL;
+
+ /* check if ELF is corrupted and avoid calling elf_strptr if yes */
+ if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL))
+ return -EINVAL;
+
+ while ((sec = elf_nextscn(elf, sec)) != NULL) {
+ char *name;
+
+ if (!gelf_getshdr(sec, shdr))
+ return -EINVAL;
+
+ name = elf_strptr(elf, shstrndx, shdr->sh_name);
+ if (name && strcmp(sec_name, name) == 0) {
+ *scn = sec;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+struct elf_seg {
+ long start;
+ long end;
+ long offset;
+ bool is_exec;
+};
+
+static int cmp_elf_segs(const void *_a, const void *_b)
+{
+ const struct elf_seg *a = _a;
+ const struct elf_seg *b = _b;
+
+ return a->start < b->start ? -1 : 1;
+}
+
+static int parse_elf_segs(Elf *elf, const char *path, struct elf_seg **segs, size_t *seg_cnt)
+{
+ GElf_Phdr phdr;
+ size_t n;
+ int i, err;
+ struct elf_seg *seg;
+ void *tmp;
+
+ *seg_cnt = 0;
+
+ if (elf_getphdrnum(elf, &n)) {
+ err = -errno;
+ return err;
+ }
+
+ for (i = 0; i < n; i++) {
+ if (!gelf_getphdr(elf, i, &phdr)) {
+ err = -errno;
+ return err;
+ }
+
+ pr_debug("usdt: discovered PHDR #%d in '%s': vaddr 0x%lx memsz 0x%lx offset 0x%lx type 0x%lx flags 0x%lx\n",
+ i, path, (long)phdr.p_vaddr, (long)phdr.p_memsz, (long)phdr.p_offset,
+ (long)phdr.p_type, (long)phdr.p_flags);
+ if (phdr.p_type != PT_LOAD)
+ continue;
+
+ tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs));
+ if (!tmp)
+ return -ENOMEM;
+
+ *segs = tmp;
+ seg = *segs + *seg_cnt;
+ (*seg_cnt)++;
+
+ seg->start = phdr.p_vaddr;
+ seg->end = phdr.p_vaddr + phdr.p_memsz;
+ seg->offset = phdr.p_offset;
+ seg->is_exec = phdr.p_flags & PF_X;
+ }
+
+ if (*seg_cnt == 0) {
+ pr_warn("usdt: failed to find PT_LOAD program headers in '%s'\n", path);
+ return -ESRCH;
+ }
+
+ qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs);
+ return 0;
+}
+
+static int parse_vma_segs(int pid, const char *lib_path, struct elf_seg **segs, size_t *seg_cnt)
+{
+ char path[PATH_MAX], line[PATH_MAX], mode[16];
+ size_t seg_start, seg_end, seg_off;
+ struct elf_seg *seg;
+ int tmp_pid, i, err;
+ FILE *f;
+
+ *seg_cnt = 0;
+
+ /* Handle containerized binaries only accessible from
+ * /proc/<pid>/root/<path>. They will be reported as just /<path> in
+ * /proc/<pid>/maps.
+ */
+ if (sscanf(lib_path, "/proc/%d/root%s", &tmp_pid, path) == 2 && pid == tmp_pid)
+ goto proceed;
+
+ if (!realpath(lib_path, path)) {
+ pr_warn("usdt: failed to get absolute path of '%s' (err %d), using path as is...\n",
+ lib_path, -errno);
+ libbpf_strlcpy(path, lib_path, sizeof(path));
+ }
+
+proceed:
+ sprintf(line, "/proc/%d/maps", pid);
+ f = fopen(line, "r");
+ if (!f) {
+ err = -errno;
+ pr_warn("usdt: failed to open '%s' to get base addr of '%s': %d\n",
+ line, lib_path, err);
+ return err;
+ }
+
+ /* We need to handle lines with no path at the end:
+ *
+ * 7f5c6f5d1000-7f5c6f5d3000 rw-p 001c7000 08:04 21238613 /usr/lib64/libc-2.17.so
+ * 7f5c6f5d3000-7f5c6f5d8000 rw-p 00000000 00:00 0
+ * 7f5c6f5d8000-7f5c6f5d9000 r-xp 00000000 103:01 362990598 /data/users/andriin/linux/tools/bpf/usdt/libhello_usdt.so
+ */
+ while (fscanf(f, "%zx-%zx %s %zx %*s %*d%[^\n]\n",
+ &seg_start, &seg_end, mode, &seg_off, line) == 5) {
+ void *tmp;
+
+ /* to handle no path case (see above) we need to capture line
+ * without skipping any whitespaces. So we need to strip
+ * leading whitespaces manually here
+ */
+ i = 0;
+ while (isblank(line[i]))
+ i++;
+ if (strcmp(line + i, path) != 0)
+ continue;
+
+ pr_debug("usdt: discovered segment for lib '%s': addrs %zx-%zx mode %s offset %zx\n",
+ path, seg_start, seg_end, mode, seg_off);
+
+ /* ignore non-executable sections for shared libs */
+ if (mode[2] != 'x')
+ continue;
+
+ tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs));
+ if (!tmp) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ *segs = tmp;
+ seg = *segs + *seg_cnt;
+ *seg_cnt += 1;
+
+ seg->start = seg_start;
+ seg->end = seg_end;
+ seg->offset = seg_off;
+ seg->is_exec = true;
+ }
+
+ if (*seg_cnt == 0) {
+ pr_warn("usdt: failed to find '%s' (resolved to '%s') within PID %d memory mappings\n",
+ lib_path, path, pid);
+ err = -ESRCH;
+ goto err_out;
+ }
+
+ qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs);
+ err = 0;
+err_out:
+ fclose(f);
+ return err;
+}
+
+static struct elf_seg *find_elf_seg(struct elf_seg *segs, size_t seg_cnt, long virtaddr)
+{
+ struct elf_seg *seg;
+ int i;
+
+ /* for ELF binaries (both executables and shared libraries), we are
+ * given virtual address (absolute for executables, relative for
+ * libraries) which should match address range of [seg_start, seg_end)
+ */
+ for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
+ if (seg->start <= virtaddr && virtaddr < seg->end)
+ return seg;
+ }
+ return NULL;
+}
+
+static struct elf_seg *find_vma_seg(struct elf_seg *segs, size_t seg_cnt, long offset)
+{
+ struct elf_seg *seg;
+ int i;
+
+ /* for VMA segments from /proc/<pid>/maps file, provided "address" is
+ * actually a file offset, so should be fall within logical
+ * offset-based range of [offset_start, offset_end)
+ */
+ for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
+ if (seg->offset <= offset && offset < seg->offset + (seg->end - seg->start))
+ return seg;
+ }
+ return NULL;
+}
+
+static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr,
+ const char *data, size_t name_off, size_t desc_off,
+ struct usdt_note *usdt_note);
+
+static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie);
+
+static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *path, pid_t pid,
+ const char *usdt_provider, const char *usdt_name, __u64 usdt_cookie,
+ struct usdt_target **out_targets, size_t *out_target_cnt)
+{
+ size_t off, name_off, desc_off, seg_cnt = 0, vma_seg_cnt = 0, target_cnt = 0;
+ struct elf_seg *segs = NULL, *vma_segs = NULL;
+ struct usdt_target *targets = NULL, *target;
+ long base_addr = 0;
+ Elf_Scn *notes_scn, *base_scn;
+ GElf_Shdr base_shdr, notes_shdr;
+ GElf_Ehdr ehdr;
+ GElf_Nhdr nhdr;
+ Elf_Data *data;
+ int err;
+
+ *out_targets = NULL;
+ *out_target_cnt = 0;
+
+ err = find_elf_sec_by_name(elf, USDT_NOTE_SEC, &notes_shdr, &notes_scn);
+ if (err) {
+ pr_warn("usdt: no USDT notes section (%s) found in '%s'\n", USDT_NOTE_SEC, path);
+ return err;
+ }
+
+ if (notes_shdr.sh_type != SHT_NOTE || !gelf_getehdr(elf, &ehdr)) {
+ pr_warn("usdt: invalid USDT notes section (%s) in '%s'\n", USDT_NOTE_SEC, path);
+ return -EINVAL;
+ }
+
+ err = parse_elf_segs(elf, path, &segs, &seg_cnt);
+ if (err) {
+ pr_warn("usdt: failed to process ELF program segments for '%s': %d\n", path, err);
+ goto err_out;
+ }
+
+ /* .stapsdt.base ELF section is optional, but is used for prelink
+ * offset compensation (see a big comment further below)
+ */
+ if (find_elf_sec_by_name(elf, USDT_BASE_SEC, &base_shdr, &base_scn) == 0)
+ base_addr = base_shdr.sh_addr;
+
+ data = elf_getdata(notes_scn, 0);
+ off = 0;
+ while ((off = gelf_getnote(data, off, &nhdr, &name_off, &desc_off)) > 0) {
+ long usdt_abs_ip, usdt_rel_ip, usdt_sema_off = 0;
+ struct usdt_note note;
+ struct elf_seg *seg = NULL;
+ void *tmp;
+
+ err = parse_usdt_note(elf, path, &nhdr, data->d_buf, name_off, desc_off, &note);
+ if (err)
+ goto err_out;
+
+ if (strcmp(note.provider, usdt_provider) != 0 || strcmp(note.name, usdt_name) != 0)
+ continue;
+
+ /* We need to compensate "prelink effect". See [0] for details,
+ * relevant parts quoted here:
+ *
+ * Each SDT probe also expands into a non-allocated ELF note. You can
+ * find this by looking at SHT_NOTE sections and decoding the format;
+ * see below for details. Because the note is non-allocated, it means
+ * there is no runtime cost, and also preserved in both stripped files
+ * and .debug files.
+ *
+ * However, this means that prelink won't adjust the note's contents
+ * for address offsets. Instead, this is done via the .stapsdt.base
+ * section. This is a special section that is added to the text. We
+ * will only ever have one of these sections in a final link and it
+ * will only ever be one byte long. Nothing about this section itself
+ * matters, we just use it as a marker to detect prelink address
+ * adjustments.
+ *
+ * Each probe note records the link-time address of the .stapsdt.base
+ * section alongside the probe PC address. The decoder compares the
+ * base address stored in the note with the .stapsdt.base section's
+ * sh_addr. Initially these are the same, but the section header will
+ * be adjusted by prelink. So the decoder applies the difference to
+ * the probe PC address to get the correct prelinked PC address; the
+ * same adjustment is applied to the semaphore address, if any.
+ *
+ * [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
+ */
+ usdt_abs_ip = note.loc_addr;
+ if (base_addr)
+ usdt_abs_ip += base_addr - note.base_addr;
+
+ /* When attaching uprobes (which is what USDTs basically are)
+ * kernel expects file offset to be specified, not a relative
+ * virtual address, so we need to translate virtual address to
+ * file offset, for both ET_EXEC and ET_DYN binaries.
+ */
+ seg = find_elf_seg(segs, seg_cnt, usdt_abs_ip);
+ if (!seg) {
+ err = -ESRCH;
+ pr_warn("usdt: failed to find ELF program segment for '%s:%s' in '%s' at IP 0x%lx\n",
+ usdt_provider, usdt_name, path, usdt_abs_ip);
+ goto err_out;
+ }
+ if (!seg->is_exec) {
+ err = -ESRCH;
+ pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx) for '%s:%s' at IP 0x%lx is not executable\n",
+ path, seg->start, seg->end, usdt_provider, usdt_name,
+ usdt_abs_ip);
+ goto err_out;
+ }
+ /* translate from virtual address to file offset */
+ usdt_rel_ip = usdt_abs_ip - seg->start + seg->offset;
+
+ if (ehdr.e_type == ET_DYN && !man->has_bpf_cookie) {
+ /* If we don't have BPF cookie support but need to
+ * attach to a shared library, we'll need to know and
+ * record absolute addresses of attach points due to
+ * the need to lookup USDT spec by absolute IP of
+ * triggered uprobe. Doing this resolution is only
+ * possible when we have a specific PID of the process
+ * that's using specified shared library. BPF cookie
+ * removes the absolute address limitation as we don't
+ * need to do this lookup (we just use BPF cookie as
+ * an index of USDT spec), so for newer kernels with
+ * BPF cookie support libbpf supports USDT attachment
+ * to shared libraries with no PID filter.
+ */
+ if (pid < 0) {
+ pr_warn("usdt: attaching to shared libraries without specific PID is not supported on current kernel\n");
+ err = -ENOTSUP;
+ goto err_out;
+ }
+
+ /* vma_segs are lazily initialized only if necessary */
+ if (vma_seg_cnt == 0) {
+ err = parse_vma_segs(pid, path, &vma_segs, &vma_seg_cnt);
+ if (err) {
+ pr_warn("usdt: failed to get memory segments in PID %d for shared library '%s': %d\n",
+ pid, path, err);
+ goto err_out;
+ }
+ }
+
+ seg = find_vma_seg(vma_segs, vma_seg_cnt, usdt_rel_ip);
+ if (!seg) {
+ err = -ESRCH;
+ pr_warn("usdt: failed to find shared lib memory segment for '%s:%s' in '%s' at relative IP 0x%lx\n",
+ usdt_provider, usdt_name, path, usdt_rel_ip);
+ goto err_out;
+ }
+
+ usdt_abs_ip = seg->start - seg->offset + usdt_rel_ip;
+ }
+
+ pr_debug("usdt: probe for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved abs_ip 0x%lx rel_ip 0x%lx) args '%s' in segment [0x%lx, 0x%lx) at offset 0x%lx\n",
+ usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ", path,
+ note.loc_addr, note.base_addr, usdt_abs_ip, usdt_rel_ip, note.args,
+ seg ? seg->start : 0, seg ? seg->end : 0, seg ? seg->offset : 0);
+
+ /* Adjust semaphore address to be a file offset */
+ if (note.sema_addr) {
+ if (!man->has_sema_refcnt) {
+ pr_warn("usdt: kernel doesn't support USDT semaphore refcounting for '%s:%s' in '%s'\n",
+ usdt_provider, usdt_name, path);
+ err = -ENOTSUP;
+ goto err_out;
+ }
+
+ seg = find_elf_seg(segs, seg_cnt, note.sema_addr);
+ if (!seg) {
+ err = -ESRCH;
+ pr_warn("usdt: failed to find ELF loadable segment with semaphore of '%s:%s' in '%s' at 0x%lx\n",
+ usdt_provider, usdt_name, path, note.sema_addr);
+ goto err_out;
+ }
+ if (seg->is_exec) {
+ err = -ESRCH;
+ pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx] for semaphore of '%s:%s' at 0x%lx is executable\n",
+ path, seg->start, seg->end, usdt_provider, usdt_name,
+ note.sema_addr);
+ goto err_out;
+ }
+
+ usdt_sema_off = note.sema_addr - seg->start + seg->offset;
+
+ pr_debug("usdt: sema for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved 0x%lx) in segment [0x%lx, 0x%lx] at offset 0x%lx\n",
+ usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ",
+ path, note.sema_addr, note.base_addr, usdt_sema_off,
+ seg->start, seg->end, seg->offset);
+ }
+
+ /* Record adjusted addresses and offsets and parse USDT spec */
+ tmp = libbpf_reallocarray(targets, target_cnt + 1, sizeof(*targets));
+ if (!tmp) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ targets = tmp;
+
+ target = &targets[target_cnt];
+ memset(target, 0, sizeof(*target));
+
+ target->abs_ip = usdt_abs_ip;
+ target->rel_ip = usdt_rel_ip;
+ target->sema_off = usdt_sema_off;
+
+ /* notes.args references strings from Elf itself, so they can
+ * be referenced safely until elf_end() call
+ */
+ target->spec_str = note.args;
+
+ err = parse_usdt_spec(&target->spec, &note, usdt_cookie);
+ if (err)
+ goto err_out;
+
+ target_cnt++;
+ }
+
+ *out_targets = targets;
+ *out_target_cnt = target_cnt;
+ err = target_cnt;
+
+err_out:
+ free(segs);
+ free(vma_segs);
+ if (err < 0)
+ free(targets);
+ return err;
+}
+
+struct bpf_link_usdt {
+ struct bpf_link link;
+
+ struct usdt_manager *usdt_man;
+
+ size_t spec_cnt;
+ int *spec_ids;
+
+ size_t uprobe_cnt;
+ struct {
+ long abs_ip;
+ struct bpf_link *link;
+ } *uprobes;
+};
+
+static int bpf_link_usdt_detach(struct bpf_link *link)
+{
+ struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link);
+ struct usdt_manager *man = usdt_link->usdt_man;
+ int i;
+
+ for (i = 0; i < usdt_link->uprobe_cnt; i++) {
+ /* detach underlying uprobe link */
+ bpf_link__destroy(usdt_link->uprobes[i].link);
+ /* there is no need to update specs map because it will be
+ * unconditionally overwritten on subsequent USDT attaches,
+ * but if BPF cookies are not used we need to remove entry
+ * from ip_to_spec_id map, otherwise we'll run into false
+ * conflicting IP errors
+ */
+ if (!man->has_bpf_cookie) {
+ /* not much we can do about errors here */
+ (void)bpf_map_delete_elem(bpf_map__fd(man->ip_to_spec_id_map),
+ &usdt_link->uprobes[i].abs_ip);
+ }
+ }
+
+ /* try to return the list of previously used spec IDs to usdt_manager
+ * for future reuse for subsequent USDT attaches
+ */
+ if (!man->free_spec_ids) {
+ /* if there were no free spec IDs yet, just transfer our IDs */
+ man->free_spec_ids = usdt_link->spec_ids;
+ man->free_spec_cnt = usdt_link->spec_cnt;
+ usdt_link->spec_ids = NULL;
+ } else {
+ /* otherwise concat IDs */
+ size_t new_cnt = man->free_spec_cnt + usdt_link->spec_cnt;
+ int *new_free_ids;
+
+ new_free_ids = libbpf_reallocarray(man->free_spec_ids, new_cnt,
+ sizeof(*new_free_ids));
+ /* If we couldn't resize free_spec_ids, we'll just leak
+ * a bunch of free IDs; this is very unlikely to happen and if
+ * system is so exhausted on memory, it's the least of user's
+ * concerns, probably.
+ * So just do our best here to return those IDs to usdt_manager.
+ */
+ if (new_free_ids) {
+ memcpy(new_free_ids + man->free_spec_cnt, usdt_link->spec_ids,
+ usdt_link->spec_cnt * sizeof(*usdt_link->spec_ids));
+ man->free_spec_ids = new_free_ids;
+ man->free_spec_cnt = new_cnt;
+ }
+ }
+
+ return 0;
+}
+
+static void bpf_link_usdt_dealloc(struct bpf_link *link)
+{
+ struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link);
+
+ free(usdt_link->spec_ids);
+ free(usdt_link->uprobes);
+ free(usdt_link);
+}
+
+static size_t specs_hash_fn(const void *key, void *ctx)
+{
+ const char *s = key;
+
+ return str_hash(s);
+}
+
+static bool specs_equal_fn(const void *key1, const void *key2, void *ctx)
+{
+ const char *s1 = key1;
+ const char *s2 = key2;
+
+ return strcmp(s1, s2) == 0;
+}
+
+static int allocate_spec_id(struct usdt_manager *man, struct hashmap *specs_hash,
+ struct bpf_link_usdt *link, struct usdt_target *target,
+ int *spec_id, bool *is_new)
+{
+ void *tmp;
+ int err;
+
+ /* check if we already allocated spec ID for this spec string */
+ if (hashmap__find(specs_hash, target->spec_str, &tmp)) {
+ *spec_id = (long)tmp;
+ *is_new = false;
+ return 0;
+ }
+
+ /* otherwise it's a new ID that needs to be set up in specs map and
+ * returned back to usdt_manager when USDT link is detached
+ */
+ tmp = libbpf_reallocarray(link->spec_ids, link->spec_cnt + 1, sizeof(*link->spec_ids));
+ if (!tmp)
+ return -ENOMEM;
+ link->spec_ids = tmp;
+
+ /* get next free spec ID, giving preference to free list, if not empty */
+ if (man->free_spec_cnt) {
+ *spec_id = man->free_spec_ids[man->free_spec_cnt - 1];
+
+ /* cache spec ID for current spec string for future lookups */
+ err = hashmap__add(specs_hash, target->spec_str, (void *)(long)*spec_id);
+ if (err)
+ return err;
+
+ man->free_spec_cnt--;
+ } else {
+ /* don't allocate spec ID bigger than what fits in specs map */
+ if (man->next_free_spec_id >= bpf_map__max_entries(man->specs_map))
+ return -E2BIG;
+
+ *spec_id = man->next_free_spec_id;
+
+ /* cache spec ID for current spec string for future lookups */
+ err = hashmap__add(specs_hash, target->spec_str, (void *)(long)*spec_id);
+ if (err)
+ return err;
+
+ man->next_free_spec_id++;
+ }
+
+ /* remember new spec ID in the link for later return back to free list on detach */
+ link->spec_ids[link->spec_cnt] = *spec_id;
+ link->spec_cnt++;
+ *is_new = true;
+ return 0;
+}
+
+struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct bpf_program *prog,
+ pid_t pid, const char *path,
+ const char *usdt_provider, const char *usdt_name,
+ __u64 usdt_cookie)
+{
+ int i, fd, err, spec_map_fd, ip_map_fd;
+ LIBBPF_OPTS(bpf_uprobe_opts, opts);
+ struct hashmap *specs_hash = NULL;
+ struct bpf_link_usdt *link = NULL;
+ struct usdt_target *targets = NULL;
+ size_t target_cnt;
+ Elf *elf;
+
+ spec_map_fd = bpf_map__fd(man->specs_map);
+ ip_map_fd = bpf_map__fd(man->ip_to_spec_id_map);
+
+ /* TODO: perform path resolution similar to uprobe's */
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ err = -errno;
+ pr_warn("usdt: failed to open ELF binary '%s': %d\n", path, err);
+ return libbpf_err_ptr(err);
+ }
+
+ elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
+ if (!elf) {
+ err = -EBADF;
+ pr_warn("usdt: failed to parse ELF binary '%s': %s\n", path, elf_errmsg(-1));
+ goto err_out;
+ }
+
+ err = sanity_check_usdt_elf(elf, path);
+ if (err)
+ goto err_out;
+
+ /* normalize PID filter */
+ if (pid < 0)
+ pid = -1;
+ else if (pid == 0)
+ pid = getpid();
+
+ /* discover USDT in given binary, optionally limiting
+ * activations to a given PID, if pid > 0
+ */
+ err = collect_usdt_targets(man, elf, path, pid, usdt_provider, usdt_name,
+ usdt_cookie, &targets, &target_cnt);
+ if (err <= 0) {
+ err = (err == 0) ? -ENOENT : err;
+ goto err_out;
+ }
+
+ specs_hash = hashmap__new(specs_hash_fn, specs_equal_fn, NULL);
+ if (IS_ERR(specs_hash)) {
+ err = PTR_ERR(specs_hash);
+ goto err_out;
+ }
+
+ link = calloc(1, sizeof(*link));
+ if (!link) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ link->usdt_man = man;
+ link->link.detach = &bpf_link_usdt_detach;
+ link->link.dealloc = &bpf_link_usdt_dealloc;
+
+ link->uprobes = calloc(target_cnt, sizeof(*link->uprobes));
+ if (!link->uprobes) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ for (i = 0; i < target_cnt; i++) {
+ struct usdt_target *target = &targets[i];
+ struct bpf_link *uprobe_link;
+ bool is_new;
+ int spec_id;
+
+ /* Spec ID can be either reused or newly allocated. If it is
+ * newly allocated, we'll need to fill out spec map, otherwise
+ * entire spec should be valid and can be just used by a new
+ * uprobe. We reuse spec when USDT arg spec is identical. We
+ * also never share specs between two different USDT
+ * attachments ("links"), so all the reused specs already
+ * share USDT cookie value implicitly.
+ */
+ err = allocate_spec_id(man, specs_hash, link, target, &spec_id, &is_new);
+ if (err)
+ goto err_out;
+
+ if (is_new && bpf_map_update_elem(spec_map_fd, &spec_id, &target->spec, BPF_ANY)) {
+ err = -errno;
+ pr_warn("usdt: failed to set USDT spec #%d for '%s:%s' in '%s': %d\n",
+ spec_id, usdt_provider, usdt_name, path, err);
+ goto err_out;
+ }
+ if (!man->has_bpf_cookie &&
+ bpf_map_update_elem(ip_map_fd, &target->abs_ip, &spec_id, BPF_NOEXIST)) {
+ err = -errno;
+ if (err == -EEXIST) {
+ pr_warn("usdt: IP collision detected for spec #%d for '%s:%s' in '%s'\n",
+ spec_id, usdt_provider, usdt_name, path);
+ } else {
+ pr_warn("usdt: failed to map IP 0x%lx to spec #%d for '%s:%s' in '%s': %d\n",
+ target->abs_ip, spec_id, usdt_provider, usdt_name,
+ path, err);
+ }
+ goto err_out;
+ }
+
+ opts.ref_ctr_offset = target->sema_off;
+ opts.bpf_cookie = man->has_bpf_cookie ? spec_id : 0;
+ uprobe_link = bpf_program__attach_uprobe_opts(prog, pid, path,
+ target->rel_ip, &opts);
+ err = libbpf_get_error(uprobe_link);
+ if (err) {
+ pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %d\n",
+ i, usdt_provider, usdt_name, path, err);
+ goto err_out;
+ }
+
+ link->uprobes[i].link = uprobe_link;
+ link->uprobes[i].abs_ip = target->abs_ip;
+ link->uprobe_cnt++;
+ }
+
+ free(targets);
+ hashmap__free(specs_hash);
+ elf_end(elf);
+ close(fd);
+
+ return &link->link;
+
+err_out:
+ if (link)
+ bpf_link__destroy(&link->link);
+ free(targets);
+ hashmap__free(specs_hash);
+ if (elf)
+ elf_end(elf);
+ close(fd);
+ return libbpf_err_ptr(err);
+}
+
+/* Parse out USDT ELF note from '.note.stapsdt' section.
+ * Logic inspired by perf's code.
+ */
+static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr,
+ const char *data, size_t name_off, size_t desc_off,
+ struct usdt_note *note)
+{
+ const char *provider, *name, *args;
+ long addrs[3];
+ size_t len;
+
+ /* sanity check USDT note name and type first */
+ if (strncmp(data + name_off, USDT_NOTE_NAME, nhdr->n_namesz) != 0)
+ return -EINVAL;
+ if (nhdr->n_type != USDT_NOTE_TYPE)
+ return -EINVAL;
+
+ /* sanity check USDT note contents ("description" in ELF terminology) */
+ len = nhdr->n_descsz;
+ data = data + desc_off;
+
+ /* +3 is the very minimum required to store three empty strings */
+ if (len < sizeof(addrs) + 3)
+ return -EINVAL;
+
+ /* get location, base, and semaphore addrs */
+ memcpy(&addrs, data, sizeof(addrs));
+
+ /* parse string fields: provider, name, args */
+ provider = data + sizeof(addrs);
+
+ name = (const char *)memchr(provider, '\0', data + len - provider);
+ if (!name) /* non-zero-terminated provider */
+ return -EINVAL;
+ name++;
+ if (name >= data + len || *name == '\0') /* missing or empty name */
+ return -EINVAL;
+
+ args = memchr(name, '\0', data + len - name);
+ if (!args) /* non-zero-terminated name */
+ return -EINVAL;
+ ++args;
+ if (args >= data + len) /* missing arguments spec */
+ return -EINVAL;
+
+ note->provider = provider;
+ note->name = name;
+ if (*args == '\0' || *args == ':')
+ note->args = "";
+ else
+ note->args = args;
+ note->loc_addr = addrs[0];
+ note->base_addr = addrs[1];
+ note->sema_addr = addrs[2];
+
+ return 0;
+}
+
+static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg);
+
+static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie)
+{
+ const char *s;
+ int len;
+
+ spec->usdt_cookie = usdt_cookie;
+ spec->arg_cnt = 0;
+
+ s = note->args;
+ while (s[0]) {
+ if (spec->arg_cnt >= USDT_MAX_ARG_CNT) {
+ pr_warn("usdt: too many USDT arguments (> %d) for '%s:%s' with args spec '%s'\n",
+ USDT_MAX_ARG_CNT, note->provider, note->name, note->args);
+ return -E2BIG;
+ }
+
+ len = parse_usdt_arg(s, spec->arg_cnt, &spec->args[spec->arg_cnt]);
+ if (len < 0)
+ return len;
+
+ s += len;
+ spec->arg_cnt++;
+ }
+
+ return 0;
+}
+
+/* Architecture-specific logic for parsing USDT argument location specs */
+
+#if defined(__x86_64__) || defined(__i386__)
+
+static int calc_pt_regs_off(const char *reg_name)
+{
+ static struct {
+ const char *names[4];
+ size_t pt_regs_off;
+ } reg_map[] = {
+#ifdef __x86_64__
+#define reg_off(reg64, reg32) offsetof(struct pt_regs, reg64)
+#else
+#define reg_off(reg64, reg32) offsetof(struct pt_regs, reg32)
+#endif
+ { {"rip", "eip", "", ""}, reg_off(rip, eip) },
+ { {"rax", "eax", "ax", "al"}, reg_off(rax, eax) },
+ { {"rbx", "ebx", "bx", "bl"}, reg_off(rbx, ebx) },
+ { {"rcx", "ecx", "cx", "cl"}, reg_off(rcx, ecx) },
+ { {"rdx", "edx", "dx", "dl"}, reg_off(rdx, edx) },
+ { {"rsi", "esi", "si", "sil"}, reg_off(rsi, esi) },
+ { {"rdi", "edi", "di", "dil"}, reg_off(rdi, edi) },
+ { {"rbp", "ebp", "bp", "bpl"}, reg_off(rbp, ebp) },
+ { {"rsp", "esp", "sp", "spl"}, reg_off(rsp, esp) },
+#undef reg_off
+#ifdef __x86_64__
+ { {"r8", "r8d", "r8w", "r8b"}, offsetof(struct pt_regs, r8) },
+ { {"r9", "r9d", "r9w", "r9b"}, offsetof(struct pt_regs, r9) },
+ { {"r10", "r10d", "r10w", "r10b"}, offsetof(struct pt_regs, r10) },
+ { {"r11", "r11d", "r11w", "r11b"}, offsetof(struct pt_regs, r11) },
+ { {"r12", "r12d", "r12w", "r12b"}, offsetof(struct pt_regs, r12) },
+ { {"r13", "r13d", "r13w", "r13b"}, offsetof(struct pt_regs, r13) },
+ { {"r14", "r14d", "r14w", "r14b"}, offsetof(struct pt_regs, r14) },
+ { {"r15", "r15d", "r15w", "r15b"}, offsetof(struct pt_regs, r15) },
+#endif
+ };
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
+ for (j = 0; j < ARRAY_SIZE(reg_map[i].names); j++) {
+ if (strcmp(reg_name, reg_map[i].names[j]) == 0)
+ return reg_map[i].pt_regs_off;
+ }
+ }
+
+ pr_warn("usdt: unrecognized register '%s'\n", reg_name);
+ return -ENOENT;
+}
+
+static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
+{
+ char *reg_name = NULL;
+ int arg_sz, len, reg_off;
+ long off;
+
+ if (sscanf(arg_str, " %d @ %ld ( %%%m[^)] ) %n", &arg_sz, &off, &reg_name, &len) == 3) {
+ /* Memory dereference case, e.g., -4@-20(%rbp) */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = off;
+ reg_off = calc_pt_regs_off(reg_name);
+ free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else if (sscanf(arg_str, " %d @ %%%ms %n", &arg_sz, &reg_name, &len) == 2) {
+ /* Register read case, e.g., -4@%eax */
+ arg->arg_type = USDT_ARG_REG;
+ arg->val_off = 0;
+
+ reg_off = calc_pt_regs_off(reg_name);
+ free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else if (sscanf(arg_str, " %d @ $%ld %n", &arg_sz, &off, &len) == 2) {
+ /* Constant value case, e.g., 4@$71 */
+ arg->arg_type = USDT_ARG_CONST;
+ arg->val_off = off;
+ arg->reg_off = 0;
+ } else {
+ pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
+ return -EINVAL;
+ }
+
+ arg->arg_signed = arg_sz < 0;
+ if (arg_sz < 0)
+ arg_sz = -arg_sz;
+
+ switch (arg_sz) {
+ case 1: case 2: case 4: case 8:
+ arg->arg_bitshift = 64 - arg_sz * 8;
+ break;
+ default:
+ pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
+ arg_num, arg_str, arg_sz);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+#elif defined(__s390x__)
+
+/* Do not support __s390__ for now, since user_pt_regs is broken with -m31. */
+
+static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
+{
+ unsigned int reg;
+ int arg_sz, len;
+ long off;
+
+ if (sscanf(arg_str, " %d @ %ld ( %%r%u ) %n", &arg_sz, &off, &reg, &len) == 3) {
+ /* Memory dereference case, e.g., -2@-28(%r15) */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = off;
+ if (reg > 15) {
+ pr_warn("usdt: unrecognized register '%%r%u'\n", reg);
+ return -EINVAL;
+ }
+ arg->reg_off = offsetof(user_pt_regs, gprs[reg]);
+ } else if (sscanf(arg_str, " %d @ %%r%u %n", &arg_sz, &reg, &len) == 2) {
+ /* Register read case, e.g., -8@%r0 */
+ arg->arg_type = USDT_ARG_REG;
+ arg->val_off = 0;
+ if (reg > 15) {
+ pr_warn("usdt: unrecognized register '%%r%u'\n", reg);
+ return -EINVAL;
+ }
+ arg->reg_off = offsetof(user_pt_regs, gprs[reg]);
+ } else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) {
+ /* Constant value case, e.g., 4@71 */
+ arg->arg_type = USDT_ARG_CONST;
+ arg->val_off = off;
+ arg->reg_off = 0;
+ } else {
+ pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
+ return -EINVAL;
+ }
+
+ arg->arg_signed = arg_sz < 0;
+ if (arg_sz < 0)
+ arg_sz = -arg_sz;
+
+ switch (arg_sz) {
+ case 1: case 2: case 4: case 8:
+ arg->arg_bitshift = 64 - arg_sz * 8;
+ break;
+ default:
+ pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
+ arg_num, arg_str, arg_sz);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+#elif defined(__aarch64__)
+
+static int calc_pt_regs_off(const char *reg_name)
+{
+ int reg_num;
+
+ if (sscanf(reg_name, "x%d", &reg_num) == 1) {
+ if (reg_num >= 0 && reg_num < 31)
+ return offsetof(struct user_pt_regs, regs[reg_num]);
+ } else if (strcmp(reg_name, "sp") == 0) {
+ return offsetof(struct user_pt_regs, sp);
+ }
+ pr_warn("usdt: unrecognized register '%s'\n", reg_name);
+ return -ENOENT;
+}
+
+static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
+{
+ char *reg_name = NULL;
+ int arg_sz, len, reg_off;
+ long off;
+
+ if (sscanf(arg_str, " %d @ \[ %m[a-z0-9], %ld ] %n", &arg_sz, &reg_name, &off, &len) == 3) {
+ /* Memory dereference case, e.g., -4@[sp, 96] */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = off;
+ reg_off = calc_pt_regs_off(reg_name);
+ free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else if (sscanf(arg_str, " %d @ \[ %m[a-z0-9] ] %n", &arg_sz, &reg_name, &len) == 2) {
+ /* Memory dereference case, e.g., -4@[sp] */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = 0;
+ reg_off = calc_pt_regs_off(reg_name);
+ free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) {
+ /* Constant value case, e.g., 4@5 */
+ arg->arg_type = USDT_ARG_CONST;
+ arg->val_off = off;
+ arg->reg_off = 0;
+ } else if (sscanf(arg_str, " %d @ %m[a-z0-9] %n", &arg_sz, &reg_name, &len) == 2) {
+ /* Register read case, e.g., -8@x4 */
+ arg->arg_type = USDT_ARG_REG;
+ arg->val_off = 0;
+ reg_off = calc_pt_regs_off(reg_name);
+ free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else {
+ pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
+ return -EINVAL;
+ }
+
+ arg->arg_signed = arg_sz < 0;
+ if (arg_sz < 0)
+ arg_sz = -arg_sz;
+
+ switch (arg_sz) {
+ case 1: case 2: case 4: case 8:
+ arg->arg_bitshift = 64 - arg_sz * 8;
+ break;
+ default:
+ pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
+ arg_num, arg_str, arg_sz);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+#elif defined(__riscv)
+
+static int calc_pt_regs_off(const char *reg_name)
+{
+ static struct {
+ const char *name;
+ size_t pt_regs_off;
+ } reg_map[] = {
+ { "ra", offsetof(struct user_regs_struct, ra) },
+ { "sp", offsetof(struct user_regs_struct, sp) },
+ { "gp", offsetof(struct user_regs_struct, gp) },
+ { "tp", offsetof(struct user_regs_struct, tp) },
+ { "a0", offsetof(struct user_regs_struct, a0) },
+ { "a1", offsetof(struct user_regs_struct, a1) },
+ { "a2", offsetof(struct user_regs_struct, a2) },
+ { "a3", offsetof(struct user_regs_struct, a3) },
+ { "a4", offsetof(struct user_regs_struct, a4) },
+ { "a5", offsetof(struct user_regs_struct, a5) },
+ { "a6", offsetof(struct user_regs_struct, a6) },
+ { "a7", offsetof(struct user_regs_struct, a7) },
+ { "s0", offsetof(struct user_regs_struct, s0) },
+ { "s1", offsetof(struct user_regs_struct, s1) },
+ { "s2", offsetof(struct user_regs_struct, s2) },
+ { "s3", offsetof(struct user_regs_struct, s3) },
+ { "s4", offsetof(struct user_regs_struct, s4) },
+ { "s5", offsetof(struct user_regs_struct, s5) },
+ { "s6", offsetof(struct user_regs_struct, s6) },
+ { "s7", offsetof(struct user_regs_struct, s7) },
+ { "s8", offsetof(struct user_regs_struct, rv_s8) },
+ { "s9", offsetof(struct user_regs_struct, s9) },
+ { "s10", offsetof(struct user_regs_struct, s10) },
+ { "s11", offsetof(struct user_regs_struct, s11) },
+ { "t0", offsetof(struct user_regs_struct, t0) },
+ { "t1", offsetof(struct user_regs_struct, t1) },
+ { "t2", offsetof(struct user_regs_struct, t2) },
+ { "t3", offsetof(struct user_regs_struct, t3) },
+ { "t4", offsetof(struct user_regs_struct, t4) },
+ { "t5", offsetof(struct user_regs_struct, t5) },
+ { "t6", offsetof(struct user_regs_struct, t6) },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
+ if (strcmp(reg_name, reg_map[i].name) == 0)
+ return reg_map[i].pt_regs_off;
+ }
+
+ pr_warn("usdt: unrecognized register '%s'\n", reg_name);
+ return -ENOENT;
+}
+
+static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
+{
+ char *reg_name = NULL;
+ int arg_sz, len, reg_off;
+ long off;
+
+ if (sscanf(arg_str, " %d @ %ld ( %m[a-z0-9] ) %n", &arg_sz, &off, &reg_name, &len) == 3) {
+ /* Memory dereference case, e.g., -8@-88(s0) */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = off;
+ reg_off = calc_pt_regs_off(reg_name);
+ free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) {
+ /* Constant value case, e.g., 4@5 */
+ arg->arg_type = USDT_ARG_CONST;
+ arg->val_off = off;
+ arg->reg_off = 0;
+ } else if (sscanf(arg_str, " %d @ %m[a-z0-9] %n", &arg_sz, &reg_name, &len) == 2) {
+ /* Register read case, e.g., -8@a1 */
+ arg->arg_type = USDT_ARG_REG;
+ arg->val_off = 0;
+ reg_off = calc_pt_regs_off(reg_name);
+ free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else {
+ pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
+ return -EINVAL;
+ }
+
+ arg->arg_signed = arg_sz < 0;
+ if (arg_sz < 0)
+ arg_sz = -arg_sz;
+
+ switch (arg_sz) {
+ case 1: case 2: case 4: case 8:
+ arg->arg_bitshift = 64 - arg_sz * 8;
+ break;
+ default:
+ pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
+ arg_num, arg_str, arg_sz);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+#else
+
+static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
+{
+ pr_warn("usdt: libbpf doesn't support USDTs on current architecture\n");
+ return -ENOTSUP;
+}
+
+#endif
diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
index a09315538a30..e6c98a6e3908 100644
--- a/tools/lib/perf/evlist.c
+++ b/tools/lib/perf/evlist.c
@@ -23,6 +23,7 @@
#include <perf/cpumap.h>
#include <perf/threadmap.h>
#include <api/fd/array.h>
+#include "internal.h"
void perf_evlist__init(struct perf_evlist *evlist)
{
@@ -39,10 +40,11 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
* We already have cpus for evsel (via PMU sysfs) so
* keep it, if there's no target cpu list defined.
*/
- if (!evsel->own_cpus || evlist->has_user_cpus) {
- perf_cpu_map__put(evsel->cpus);
- evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
- } else if (!evsel->system_wide && perf_cpu_map__empty(evlist->user_requested_cpus)) {
+ if (!evsel->own_cpus ||
+ (!evsel->system_wide && evlist->has_user_cpus) ||
+ (!evsel->system_wide &&
+ !evsel->requires_cpu &&
+ perf_cpu_map__empty(evlist->user_requested_cpus))) {
perf_cpu_map__put(evsel->cpus);
evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
} else if (evsel->cpus != evsel->own_cpus) {
@@ -50,8 +52,11 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
}
- perf_thread_map__put(evsel->threads);
- evsel->threads = perf_thread_map__get(evlist->threads);
+ if (!evsel->system_wide) {
+ perf_thread_map__put(evsel->threads);
+ evsel->threads = perf_thread_map__get(evlist->threads);
+ }
+
evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus);
}
@@ -59,6 +64,10 @@ static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
{
struct perf_evsel *evsel;
+ /* Recomputing all_cpus, so start with a blank slate. */
+ perf_cpu_map__put(evlist->all_cpus);
+ evlist->all_cpus = NULL;
+
perf_evlist__for_each_evsel(evlist, evsel)
__perf_evlist__propagate_maps(evlist, evsel);
}
@@ -294,7 +303,7 @@ add:
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
{
- int nr_cpus = perf_cpu_map__nr(evlist->user_requested_cpus);
+ int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
int nr_threads = perf_thread_map__nr(evlist->threads);
int nfds = 0;
struct perf_evsel *evsel;
@@ -424,9 +433,9 @@ static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_
static int
mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
int idx, struct perf_mmap_param *mp, int cpu_idx,
- int thread, int *_output, int *_output_overwrite)
+ int thread, int *_output, int *_output_overwrite, int *nr_mmaps)
{
- struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->user_requested_cpus, cpu_idx);
+ struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->all_cpus, cpu_idx);
struct perf_evsel *evsel;
int revent;
@@ -474,9 +483,14 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
*/
refcount_set(&map->refcnt, 2);
+ if (ops->idx)
+ ops->idx(evlist, evsel, mp, idx);
+
if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
return -1;
+ *nr_mmaps += 1;
+
if (!idx)
perf_evlist__set_mmap_first(evlist, map, overwrite);
} else {
@@ -506,53 +520,28 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
}
static int
-mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
- struct perf_mmap_param *mp)
-{
- int thread;
- int nr_threads = perf_thread_map__nr(evlist->threads);
-
- for (thread = 0; thread < nr_threads; thread++) {
- int output = -1;
- int output_overwrite = -1;
-
- if (ops->idx)
- ops->idx(evlist, mp, thread, false);
-
- if (mmap_per_evsel(evlist, ops, thread, mp, 0, thread,
- &output, &output_overwrite))
- goto out_unmap;
- }
-
- return 0;
-
-out_unmap:
- perf_evlist__munmap(evlist);
- return -1;
-}
-
-static int
mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
struct perf_mmap_param *mp)
{
int nr_threads = perf_thread_map__nr(evlist->threads);
- int nr_cpus = perf_cpu_map__nr(evlist->user_requested_cpus);
+ int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
+ int nr_mmaps = 0;
int cpu, thread;
for (cpu = 0; cpu < nr_cpus; cpu++) {
int output = -1;
int output_overwrite = -1;
- if (ops->idx)
- ops->idx(evlist, mp, cpu, true);
-
for (thread = 0; thread < nr_threads; thread++) {
if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
- thread, &output, &output_overwrite))
+ thread, &output, &output_overwrite, &nr_mmaps))
goto out_unmap;
}
}
+ if (nr_mmaps != evlist->nr_mmaps)
+ pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
+
return 0;
out_unmap:
@@ -564,9 +553,14 @@ static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
{
int nr_mmaps;
- nr_mmaps = perf_cpu_map__nr(evlist->user_requested_cpus);
- if (perf_cpu_map__empty(evlist->user_requested_cpus))
- nr_mmaps = perf_thread_map__nr(evlist->threads);
+ /* One for each CPU */
+ nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
+ if (perf_cpu_map__empty(evlist->all_cpus)) {
+ /* Plus one for each thread */
+ nr_mmaps += perf_thread_map__nr(evlist->threads);
+ /* Minus the per-thread CPU (-1) */
+ nr_mmaps -= 1;
+ }
return nr_mmaps;
}
@@ -576,7 +570,6 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
struct perf_mmap_param *mp)
{
struct perf_evsel *evsel;
- const struct perf_cpu_map *cpus = evlist->user_requested_cpus;
if (!ops || !ops->get || !ops->mmap)
return -EINVAL;
@@ -595,9 +588,6 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
return -ENOMEM;
- if (perf_cpu_map__empty(cpus))
- return mmap_per_thread(evlist, ops, mp);
-
return mmap_per_cpu(evlist, ops, mp);
}
diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c
index 210ea7c06ce8..952f3520d5c2 100644
--- a/tools/lib/perf/evsel.c
+++ b/tools/lib/perf/evsel.c
@@ -149,23 +149,30 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
int fd, group_fd, *evsel_fd;
evsel_fd = FD(evsel, idx, thread);
- if (evsel_fd == NULL)
- return -EINVAL;
+ if (evsel_fd == NULL) {
+ err = -EINVAL;
+ goto out;
+ }
err = get_group_fd(evsel, idx, thread, &group_fd);
if (err < 0)
- return err;
+ goto out;
fd = sys_perf_event_open(&evsel->attr,
threads->map[thread].pid,
cpu, group_fd, 0);
- if (fd < 0)
- return -errno;
+ if (fd < 0) {
+ err = -errno;
+ goto out;
+ }
*evsel_fd = fd;
}
}
+out:
+ if (err)
+ perf_evsel__close(evsel);
return err;
}
@@ -328,6 +335,17 @@ int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
return 0;
}
+static int perf_evsel__ioctl(struct perf_evsel *evsel, int ioc, void *arg,
+ int cpu_map_idx, int thread)
+{
+ int *fd = FD(evsel, cpu_map_idx, thread);
+
+ if (fd == NULL || *fd < 0)
+ return -1;
+
+ return ioctl(*fd, ioc, arg);
+}
+
static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
int ioc, void *arg,
int cpu_map_idx)
@@ -335,13 +353,7 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
int thread;
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
- int err;
- int *fd = FD(evsel, cpu_map_idx, thread);
-
- if (fd == NULL || *fd < 0)
- return -1;
-
- err = ioctl(*fd, ioc, arg);
+ int err = perf_evsel__ioctl(evsel, ioc, arg, cpu_map_idx, thread);
if (err)
return err;
@@ -355,6 +367,21 @@ int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu_map_idx);
}
+int perf_evsel__enable_thread(struct perf_evsel *evsel, int thread)
+{
+ struct perf_cpu cpu __maybe_unused;
+ int idx;
+ int err;
+
+ perf_cpu_map__for_each_cpu(cpu, idx, evsel->cpus) {
+ err = perf_evsel__ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, idx, thread);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
int perf_evsel__enable(struct perf_evsel *evsel)
{
int i;
diff --git a/tools/lib/perf/include/internal/evlist.h b/tools/lib/perf/include/internal/evlist.h
index e3e64f37db7b..6f89aec3e608 100644
--- a/tools/lib/perf/include/internal/evlist.h
+++ b/tools/lib/perf/include/internal/evlist.h
@@ -38,7 +38,8 @@ struct perf_evlist {
};
typedef void
-(*perf_evlist_mmap__cb_idx_t)(struct perf_evlist*, struct perf_mmap_param*, int, bool);
+(*perf_evlist_mmap__cb_idx_t)(struct perf_evlist*, struct perf_evsel*,
+ struct perf_mmap_param*, int);
typedef struct perf_mmap*
(*perf_evlist_mmap__cb_get_t)(struct perf_evlist*, bool, int);
typedef int
diff --git a/tools/lib/perf/include/internal/evsel.h b/tools/lib/perf/include/internal/evsel.h
index cfc9ebd7968e..2a912a1f1989 100644
--- a/tools/lib/perf/include/internal/evsel.h
+++ b/tools/lib/perf/include/internal/evsel.h
@@ -49,7 +49,18 @@ struct perf_evsel {
/* parse modifier helper */
int nr_members;
+ /*
+ * system_wide is for events that need to be on every CPU, irrespective
+ * of user requested CPUs or threads. Map propagation will set cpus to
+ * this event's own_cpus, whereby they will contribute to evlist
+ * all_cpus.
+ */
bool system_wide;
+ /*
+ * Some events, for example uncore events, require a CPU.
+ * i.e. it cannot be the 'any CPU' value of -1.
+ */
+ bool requires_cpu;
int idx;
};
diff --git a/tools/lib/perf/include/internal/lib.h b/tools/lib/perf/include/internal/lib.h
index 5175d491b2d4..85471a4b900f 100644
--- a/tools/lib/perf/include/internal/lib.h
+++ b/tools/lib/perf/include/internal/lib.h
@@ -9,4 +9,6 @@ extern unsigned int page_size;
ssize_t readn(int fd, void *buf, size_t n);
ssize_t writen(int fd, const void *buf, size_t n);
+ssize_t preadn(int fd, void *buf, size_t n, off_t offs);
+
#endif /* __LIBPERF_INTERNAL_CPUMAP_H */
diff --git a/tools/lib/perf/include/perf/cpumap.h b/tools/lib/perf/include/perf/cpumap.h
index 4a2edbdb5e2b..24de795b09bb 100644
--- a/tools/lib/perf/include/perf/cpumap.h
+++ b/tools/lib/perf/include/perf/cpumap.h
@@ -31,4 +31,7 @@ LIBPERF_API bool perf_cpu_map__has(const struct perf_cpu_map *map, struct perf_c
(idx) < perf_cpu_map__nr(cpus); \
(idx)++, (cpu) = perf_cpu_map__cpu(cpus, idx))
+#define perf_cpu_map__for_each_idx(idx, cpus) \
+ for ((idx) = 0; (idx) < perf_cpu_map__nr(cpus); (idx)++)
+
#endif /* __LIBPERF_CPUMAP_H */
diff --git a/tools/lib/perf/include/perf/evsel.h b/tools/lib/perf/include/perf/evsel.h
index 2a9516b42d15..699c0ed97d34 100644
--- a/tools/lib/perf/include/perf/evsel.h
+++ b/tools/lib/perf/include/perf/evsel.h
@@ -36,6 +36,7 @@ LIBPERF_API int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int
struct perf_counts_values *count);
LIBPERF_API int perf_evsel__enable(struct perf_evsel *evsel);
LIBPERF_API int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx);
+LIBPERF_API int perf_evsel__enable_thread(struct perf_evsel *evsel, int thread);
LIBPERF_API int perf_evsel__disable(struct perf_evsel *evsel);
LIBPERF_API int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx);
LIBPERF_API struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel);
diff --git a/tools/lib/perf/lib.c b/tools/lib/perf/lib.c
index 18658931fc71..696fb0ea67c6 100644
--- a/tools/lib/perf/lib.c
+++ b/tools/lib/perf/lib.c
@@ -38,6 +38,26 @@ ssize_t readn(int fd, void *buf, size_t n)
return ion(true, fd, buf, n);
}
+ssize_t preadn(int fd, void *buf, size_t n, off_t offs)
+{
+ size_t left = n;
+
+ while (left) {
+ ssize_t ret = pread(fd, buf, left, offs);
+
+ if (ret < 0 && errno == EINTR)
+ continue;
+ if (ret <= 0)
+ return ret;
+
+ left -= ret;
+ buf += ret;
+ offs += ret;
+ }
+
+ return n;
+}
+
/*
* Write exactly 'n' bytes or return an error.
*/
diff --git a/tools/lib/thermal/.gitignore b/tools/lib/thermal/.gitignore
new file mode 100644
index 000000000000..5d2aeda80fea
--- /dev/null
+++ b/tools/lib/thermal/.gitignore
@@ -0,0 +1,2 @@
+libthermal.so*
+libthermal.pc
diff --git a/tools/lib/thermal/Build b/tools/lib/thermal/Build
new file mode 100644
index 000000000000..4a892d9e24f9
--- /dev/null
+++ b/tools/lib/thermal/Build
@@ -0,0 +1,5 @@
+libthermal-y += commands.o
+libthermal-y += events.o
+libthermal-y += thermal_nl.o
+libthermal-y += sampling.o
+libthermal-y += thermal.o
diff --git a/tools/lib/thermal/Makefile b/tools/lib/thermal/Makefile
new file mode 100644
index 000000000000..2d0d255fd0e1
--- /dev/null
+++ b/tools/lib/thermal/Makefile
@@ -0,0 +1,165 @@
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+# Most of this file is copied from tools/lib/perf/Makefile
+
+LIBTHERMAL_VERSION = 0
+LIBTHERMAL_PATCHLEVEL = 0
+LIBTHERMAL_EXTRAVERSION = 1
+
+MAKEFLAGS += --no-print-directory
+
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+# $(info Determined 'srctree' to be $(srctree))
+endif
+
+INSTALL = install
+
+# Use DESTDIR for installing into a different root directory.
+# This is useful for building a package. The program will be
+# installed in this directory as if it was the root directory.
+# Then the build tool can move it later.
+DESTDIR ?=
+DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
+
+include $(srctree)/tools/scripts/Makefile.include
+include $(srctree)/tools/scripts/Makefile.arch
+
+ifeq ($(LP64), 1)
+ libdir_relative = lib64
+else
+ libdir_relative = lib
+endif
+
+prefix ?=
+libdir = $(prefix)/$(libdir_relative)
+
+# Shell quotes
+libdir_SQ = $(subst ','\'',$(libdir))
+libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
+
+ifeq ("$(origin V)", "command line")
+ VERBOSE = $(V)
+endif
+ifndef VERBOSE
+ VERBOSE = 0
+endif
+
+ifeq ($(VERBOSE),1)
+ Q =
+else
+ Q = @
+endif
+
+# Set compile option CFLAGS
+ifdef EXTRA_CFLAGS
+ CFLAGS := $(EXTRA_CFLAGS)
+else
+ CFLAGS := -g -Wall
+endif
+
+INCLUDES = \
+-I/usr/include/libnl3 \
+-I$(srctree)/tools/lib/thermal/include \
+-I$(srctree)/tools/lib/ \
+-I$(srctree)/tools/include \
+-I$(srctree)/tools/arch/$(SRCARCH)/include/ \
+-I$(srctree)/tools/arch/$(SRCARCH)/include/uapi \
+-I$(srctree)/tools/include/uapi
+
+# Append required CFLAGS
+override CFLAGS += $(EXTRA_WARNINGS)
+override CFLAGS += -Werror -Wall
+override CFLAGS += -fPIC
+override CFLAGS += $(INCLUDES)
+override CFLAGS += -fvisibility=hidden
+override CFGLAS += -Wl,-L.
+override CFGLAS += -Wl,-lthermal
+
+all:
+
+export srctree OUTPUT CC LD CFLAGS V
+export DESTDIR DESTDIR_SQ
+
+include $(srctree)/tools/build/Makefile.include
+
+VERSION_SCRIPT := libthermal.map
+
+PATCHLEVEL = $(LIBTHERMAL_PATCHLEVEL)
+EXTRAVERSION = $(LIBTHERMAL_EXTRAVERSION)
+VERSION = $(LIBTHERMAL_VERSION).$(LIBTHERMAL_PATCHLEVEL).$(LIBTHERMAL_EXTRAVERSION)
+
+LIBTHERMAL_SO := $(OUTPUT)libthermal.so.$(VERSION)
+LIBTHERMAL_A := $(OUTPUT)libthermal.a
+LIBTHERMAL_IN := $(OUTPUT)libthermal-in.o
+LIBTHERMAL_PC := $(OUTPUT)libthermal.pc
+LIBTHERMAL_ALL := $(LIBTHERMAL_A) $(OUTPUT)libthermal.so*
+
+THERMAL_UAPI := include/uapi/linux/thermal.h
+
+$(THERMAL_UAPI): FORCE
+ ln -sf $(srctree)/$@ $(srctree)/tools/$@
+
+$(LIBTHERMAL_IN): FORCE
+ $(Q)$(MAKE) $(build)=libthermal
+
+$(LIBTHERMAL_A): $(LIBTHERMAL_IN)
+ $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIBTHERMAL_IN)
+
+$(LIBTHERMAL_SO): $(LIBTHERMAL_IN)
+ $(QUIET_LINK)$(CC) --shared -Wl,-soname,libthermal.so \
+ -Wl,--version-script=$(VERSION_SCRIPT) $^ -o $@
+ @ln -sf $(@F) $(OUTPUT)libthermal.so
+ @ln -sf $(@F) $(OUTPUT)libthermal.so.$(LIBTHERMAL_VERSION)
+
+
+libs: $(THERMAL_UAPI) $(LIBTHERMAL_A) $(LIBTHERMAL_SO) $(LIBTHERMAL_PC)
+
+all: fixdep
+ $(Q)$(MAKE) libs
+
+clean:
+ $(call QUIET_CLEAN, libthermal) $(RM) $(LIBTHERMAL_A) \
+ *.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBTHERMAL_VERSION) .*.d .*.cmd LIBTHERMAL-CFLAGS $(LIBTHERMAL_PC)
+
+$(LIBTHERMAL_PC):
+ $(QUIET_GEN)sed -e "s|@PREFIX@|$(prefix)|" \
+ -e "s|@LIBDIR@|$(libdir_SQ)|" \
+ -e "s|@VERSION@|$(VERSION)|" \
+ < libthermal.pc.template > $@
+
+define do_install_mkdir
+ if [ ! -d '$(DESTDIR_SQ)$1' ]; then \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
+ fi
+endef
+
+define do_install
+ if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
+ fi; \
+ $(INSTALL) $1 $(if $3,-m $3,) '$(DESTDIR_SQ)$2'
+endef
+
+install_lib: libs
+ $(call QUIET_INSTALL, $(LIBTHERMAL_ALL)) \
+ $(call do_install_mkdir,$(libdir_SQ)); \
+ cp -fpR $(LIBTHERMAL_ALL) $(DESTDIR)$(libdir_SQ)
+
+install_headers:
+ $(call QUIET_INSTALL, headers) \
+ $(call do_install,include/thermal.h,$(prefix)/include/thermal,644); \
+
+install_pkgconfig: $(LIBTHERMAL_PC)
+ $(call QUIET_INSTALL, $(LIBTHERMAL_PC)) \
+ $(call do_install,$(LIBTHERMAL_PC),$(libdir_SQ)/pkgconfig,644)
+
+install_doc:
+ $(Q)$(MAKE) -C Documentation install-man install-html install-examples
+
+install: install_lib install_headers install_pkgconfig
+
+FORCE:
+
+.PHONY: all install clean FORCE
diff --git a/tools/lib/thermal/commands.c b/tools/lib/thermal/commands.c
new file mode 100644
index 000000000000..73d4d4e8d6ec
--- /dev/null
+++ b/tools/lib/thermal/commands.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: LGPL-2.1+
+// Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org>
+#define _GNU_SOURCE
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <thermal.h>
+#include "thermal_nl.h"
+
+static struct nla_policy thermal_genl_policy[THERMAL_GENL_ATTR_MAX + 1] = {
+ /* Thermal zone */
+ [THERMAL_GENL_ATTR_TZ] = { .type = NLA_NESTED },
+ [THERMAL_GENL_ATTR_TZ_ID] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_TZ_TEMP] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_TZ_TRIP] = { .type = NLA_NESTED },
+ [THERMAL_GENL_ATTR_TZ_TRIP_ID] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_TZ_TRIP_TEMP] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_TZ_TRIP_TYPE] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_TZ_TRIP_HYST] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_TZ_MODE] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_TZ_CDEV_WEIGHT] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_TZ_NAME] = { .type = NLA_STRING },
+
+ /* Governor(s) */
+ [THERMAL_GENL_ATTR_TZ_GOV] = { .type = NLA_NESTED },
+ [THERMAL_GENL_ATTR_TZ_GOV_NAME] = { .type = NLA_STRING },
+
+ /* Cooling devices */
+ [THERMAL_GENL_ATTR_CDEV] = { .type = NLA_NESTED },
+ [THERMAL_GENL_ATTR_CDEV_ID] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_CDEV_CUR_STATE] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_CDEV_MAX_STATE] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_CDEV_NAME] = { .type = NLA_STRING },
+};
+
+static int parse_tz_get(struct genl_info *info, struct thermal_zone **tz)
+{
+ struct nlattr *attr;
+ struct thermal_zone *__tz = NULL;
+ size_t size = 0;
+ int rem;
+
+ nla_for_each_nested(attr, info->attrs[THERMAL_GENL_ATTR_TZ], rem) {
+
+ if (nla_type(attr) == THERMAL_GENL_ATTR_TZ_ID) {
+
+ size++;
+
+ __tz = realloc(__tz, sizeof(*__tz) * (size + 2));
+ if (!__tz)
+ return THERMAL_ERROR;
+
+ __tz[size - 1].id = nla_get_u32(attr);
+ }
+
+
+ if (nla_type(attr) == THERMAL_GENL_ATTR_TZ_NAME)
+ nla_strlcpy(__tz[size - 1].name, attr,
+ THERMAL_NAME_LENGTH);
+ }
+
+ if (__tz)
+ __tz[size].id = -1;
+
+ *tz = __tz;
+
+ return THERMAL_SUCCESS;
+}
+
+static int parse_cdev_get(struct genl_info *info, struct thermal_cdev **cdev)
+{
+ struct nlattr *attr;
+ struct thermal_cdev *__cdev = NULL;
+ size_t size = 0;
+ int rem;
+
+ nla_for_each_nested(attr, info->attrs[THERMAL_GENL_ATTR_CDEV], rem) {
+
+ if (nla_type(attr) == THERMAL_GENL_ATTR_CDEV_ID) {
+
+ size++;
+
+ __cdev = realloc(__cdev, sizeof(*__cdev) * (size + 2));
+ if (!__cdev)
+ return THERMAL_ERROR;
+
+ __cdev[size - 1].id = nla_get_u32(attr);
+ }
+
+ if (nla_type(attr) == THERMAL_GENL_ATTR_CDEV_NAME) {
+ nla_strlcpy(__cdev[size - 1].name, attr,
+ THERMAL_NAME_LENGTH);
+ }
+
+ if (nla_type(attr) == THERMAL_GENL_ATTR_CDEV_CUR_STATE)
+ __cdev[size - 1].cur_state = nla_get_u32(attr);
+
+ if (nla_type(attr) == THERMAL_GENL_ATTR_CDEV_MAX_STATE)
+ __cdev[size - 1].max_state = nla_get_u32(attr);
+ }
+
+ if (__cdev)
+ __cdev[size].id = -1;
+
+ *cdev = __cdev;
+
+ return THERMAL_SUCCESS;
+}
+
+static int parse_tz_get_trip(struct genl_info *info, struct thermal_zone *tz)
+{
+ struct nlattr *attr;
+ struct thermal_trip *__tt = NULL;
+ size_t size = 0;
+ int rem;
+
+ nla_for_each_nested(attr, info->attrs[THERMAL_GENL_ATTR_TZ_TRIP], rem) {
+
+ if (nla_type(attr) == THERMAL_GENL_ATTR_TZ_TRIP_ID) {
+
+ size++;
+
+ __tt = realloc(__tt, sizeof(*__tt) * (size + 2));
+ if (!__tt)
+ return THERMAL_ERROR;
+
+ __tt[size - 1].id = nla_get_u32(attr);
+ }
+
+ if (nla_type(attr) == THERMAL_GENL_ATTR_TZ_TRIP_TYPE)
+ __tt[size - 1].type = nla_get_u32(attr);
+
+ if (nla_type(attr) == THERMAL_GENL_ATTR_TZ_TRIP_TEMP)
+ __tt[size - 1].temp = nla_get_u32(attr);
+
+ if (nla_type(attr) == THERMAL_GENL_ATTR_TZ_TRIP_HYST)
+ __tt[size - 1].hyst = nla_get_u32(attr);
+ }
+
+ if (__tt)
+ __tt[size].id = -1;
+
+ tz->trip = __tt;
+
+ return THERMAL_SUCCESS;
+}
+
+static int parse_tz_get_temp(struct genl_info *info, struct thermal_zone *tz)
+{
+ int id = -1;
+
+ if (info->attrs[THERMAL_GENL_ATTR_TZ_ID])
+ id = nla_get_u32(info->attrs[THERMAL_GENL_ATTR_TZ_ID]);
+
+ if (tz->id != id)
+ return THERMAL_ERROR;
+
+ if (info->attrs[THERMAL_GENL_ATTR_TZ_TEMP])
+ tz->temp = nla_get_u32(info->attrs[THERMAL_GENL_ATTR_TZ_TEMP]);
+
+ return THERMAL_SUCCESS;
+}
+
+static int parse_tz_get_gov(struct genl_info *info, struct thermal_zone *tz)
+{
+ int id = -1;
+
+ if (info->attrs[THERMAL_GENL_ATTR_TZ_ID])
+ id = nla_get_u32(info->attrs[THERMAL_GENL_ATTR_TZ_ID]);
+
+ if (tz->id != id)
+ return THERMAL_ERROR;
+
+ if (info->attrs[THERMAL_GENL_ATTR_TZ_GOV_NAME]) {
+ nla_strlcpy(tz->governor,
+ info->attrs[THERMAL_GENL_ATTR_TZ_GOV_NAME],
+ THERMAL_NAME_LENGTH);
+ }
+
+ return THERMAL_SUCCESS;
+}
+
+static int handle_netlink(struct nl_cache_ops *unused,
+ struct genl_cmd *cmd,
+ struct genl_info *info, void *arg)
+{
+ int ret;
+
+ switch (cmd->c_id) {
+
+ case THERMAL_GENL_CMD_TZ_GET_ID:
+ ret = parse_tz_get(info, arg);
+ break;
+
+ case THERMAL_GENL_CMD_CDEV_GET:
+ ret = parse_cdev_get(info, arg);
+ break;
+
+ case THERMAL_GENL_CMD_TZ_GET_TEMP:
+ ret = parse_tz_get_temp(info, arg);
+ break;
+
+ case THERMAL_GENL_CMD_TZ_GET_TRIP:
+ ret = parse_tz_get_trip(info, arg);
+ break;
+
+ case THERMAL_GENL_CMD_TZ_GET_GOV:
+ ret = parse_tz_get_gov(info, arg);
+ break;
+
+ default:
+ return THERMAL_ERROR;
+ }
+
+ return ret;
+}
+
+static struct genl_cmd thermal_cmds[] = {
+ {
+ .c_id = THERMAL_GENL_CMD_TZ_GET_ID,
+ .c_name = (char *)"List thermal zones",
+ .c_msg_parser = handle_netlink,
+ .c_maxattr = THERMAL_GENL_ATTR_MAX,
+ .c_attr_policy = thermal_genl_policy,
+ },
+ {
+ .c_id = THERMAL_GENL_CMD_TZ_GET_GOV,
+ .c_name = (char *)"Get governor",
+ .c_msg_parser = handle_netlink,
+ .c_maxattr = THERMAL_GENL_ATTR_MAX,
+ .c_attr_policy = thermal_genl_policy,
+ },
+ {
+ .c_id = THERMAL_GENL_CMD_TZ_GET_TEMP,
+ .c_name = (char *)"Get thermal zone temperature",
+ .c_msg_parser = handle_netlink,
+ .c_maxattr = THERMAL_GENL_ATTR_MAX,
+ .c_attr_policy = thermal_genl_policy,
+ },
+ {
+ .c_id = THERMAL_GENL_CMD_TZ_GET_TRIP,
+ .c_name = (char *)"Get thermal zone trip points",
+ .c_msg_parser = handle_netlink,
+ .c_maxattr = THERMAL_GENL_ATTR_MAX,
+ .c_attr_policy = thermal_genl_policy,
+ },
+ {
+ .c_id = THERMAL_GENL_CMD_CDEV_GET,
+ .c_name = (char *)"Get cooling devices",
+ .c_msg_parser = handle_netlink,
+ .c_maxattr = THERMAL_GENL_ATTR_MAX,
+ .c_attr_policy = thermal_genl_policy,
+ },
+};
+
+static struct genl_ops thermal_cmd_ops = {
+ .o_name = (char *)"thermal",
+ .o_cmds = thermal_cmds,
+ .o_ncmds = ARRAY_SIZE(thermal_cmds),
+};
+
+static thermal_error_t thermal_genl_auto(struct thermal_handler *th, int id, int cmd,
+ int flags, void *arg)
+{
+ struct nl_msg *msg;
+ void *hdr;
+
+ msg = nlmsg_alloc();
+ if (!msg)
+ return THERMAL_ERROR;
+
+ hdr = genlmsg_put(msg, NL_AUTO_PORT, NL_AUTO_SEQ, thermal_cmd_ops.o_id,
+ 0, flags, cmd, THERMAL_GENL_VERSION);
+ if (!hdr)
+ return THERMAL_ERROR;
+
+ if (id >= 0 && nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_ID, id))
+ return THERMAL_ERROR;
+
+ if (nl_send_msg(th->sk_cmd, th->cb_cmd, msg, genl_handle_msg, arg))
+ return THERMAL_ERROR;
+
+ nlmsg_free(msg);
+
+ return THERMAL_SUCCESS;
+}
+
+thermal_error_t thermal_cmd_get_tz(struct thermal_handler *th, struct thermal_zone **tz)
+{
+ return thermal_genl_auto(th, -1, THERMAL_GENL_CMD_TZ_GET_ID,
+ NLM_F_DUMP | NLM_F_ACK, tz);
+}
+
+thermal_error_t thermal_cmd_get_cdev(struct thermal_handler *th, struct thermal_cdev **tc)
+{
+ return thermal_genl_auto(th, -1, THERMAL_GENL_CMD_CDEV_GET,
+ NLM_F_DUMP | NLM_F_ACK, tc);
+}
+
+thermal_error_t thermal_cmd_get_trip(struct thermal_handler *th, struct thermal_zone *tz)
+{
+ return thermal_genl_auto(th, tz->id, THERMAL_GENL_CMD_TZ_GET_TRIP,
+ 0, tz);
+}
+
+thermal_error_t thermal_cmd_get_governor(struct thermal_handler *th, struct thermal_zone *tz)
+{
+ return thermal_genl_auto(th, tz->id, THERMAL_GENL_CMD_TZ_GET_GOV, 0, tz);
+}
+
+thermal_error_t thermal_cmd_get_temp(struct thermal_handler *th, struct thermal_zone *tz)
+{
+ return thermal_genl_auto(th, tz->id, THERMAL_GENL_CMD_TZ_GET_TEMP, 0, tz);
+}
+
+thermal_error_t thermal_cmd_exit(struct thermal_handler *th)
+{
+ if (genl_unregister_family(&thermal_cmd_ops))
+ return THERMAL_ERROR;
+
+ nl_thermal_disconnect(th->sk_cmd, th->cb_cmd);
+
+ return THERMAL_SUCCESS;
+}
+
+thermal_error_t thermal_cmd_init(struct thermal_handler *th)
+{
+ int ret;
+ int family;
+
+ if (nl_thermal_connect(&th->sk_cmd, &th->cb_cmd))
+ return THERMAL_ERROR;
+
+ ret = genl_register_family(&thermal_cmd_ops);
+ if (ret)
+ return THERMAL_ERROR;
+
+ ret = genl_ops_resolve(th->sk_cmd, &thermal_cmd_ops);
+ if (ret)
+ return THERMAL_ERROR;
+
+ family = genl_ctrl_resolve(th->sk_cmd, "nlctrl");
+ if (family != GENL_ID_CTRL)
+ return THERMAL_ERROR;
+
+ return THERMAL_SUCCESS;
+}
diff --git a/tools/lib/thermal/events.c b/tools/lib/thermal/events.c
new file mode 100644
index 000000000000..a7a55d1a0c4c
--- /dev/null
+++ b/tools/lib/thermal/events.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: LGPL-2.1+
+// Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org>
+#include <linux/netlink.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+
+#include <thermal.h>
+#include "thermal_nl.h"
+
+/*
+ * Optimization: fill this array to tell which event we do want to pay
+ * attention to. That happens at init time with the ops
+ * structure. Each ops will enable the event and the general handler
+ * will be able to discard the event if there is not ops associated
+ * with it.
+ */
+static int enabled_ops[__THERMAL_GENL_EVENT_MAX];
+
+static int handle_thermal_event(struct nl_msg *n, void *arg)
+{
+ struct nlmsghdr *nlh = nlmsg_hdr(n);
+ struct genlmsghdr *genlhdr = genlmsg_hdr(nlh);
+ struct nlattr *attrs[THERMAL_GENL_ATTR_MAX + 1];
+ struct thermal_handler_param *thp = arg;
+ struct thermal_events_ops *ops = &thp->th->ops->events;
+
+ genlmsg_parse(nlh, 0, attrs, THERMAL_GENL_ATTR_MAX, NULL);
+
+ arg = thp->arg;
+
+ /*
+ * This is an event we don't care of, bail out.
+ */
+ if (!enabled_ops[genlhdr->cmd])
+ return THERMAL_SUCCESS;
+
+ switch (genlhdr->cmd) {
+
+ case THERMAL_GENL_EVENT_TZ_CREATE:
+ return ops->tz_create(nla_get_string(attrs[THERMAL_GENL_ATTR_TZ_NAME]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]), arg);
+
+ case THERMAL_GENL_EVENT_TZ_DELETE:
+ return ops->tz_delete(nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]), arg);
+
+ case THERMAL_GENL_EVENT_TZ_ENABLE:
+ return ops->tz_enable(nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]), arg);
+
+ case THERMAL_GENL_EVENT_TZ_DISABLE:
+ return ops->tz_disable(nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]), arg);
+
+ case THERMAL_GENL_EVENT_TZ_TRIP_CHANGE:
+ return ops->trip_change(nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_TYPE]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_TEMP]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_HYST]), arg);
+
+ case THERMAL_GENL_EVENT_TZ_TRIP_ADD:
+ return ops->trip_add(nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_TYPE]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_TEMP]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_HYST]), arg);
+
+ case THERMAL_GENL_EVENT_TZ_TRIP_DELETE:
+ return ops->trip_delete(nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_ID]), arg);
+
+ case THERMAL_GENL_EVENT_TZ_TRIP_UP:
+ return ops->trip_high(nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TEMP]), arg);
+
+ case THERMAL_GENL_EVENT_TZ_TRIP_DOWN:
+ return ops->trip_low(nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TEMP]), arg);
+
+ case THERMAL_GENL_EVENT_CDEV_ADD:
+ return ops->cdev_add(nla_get_string(attrs[THERMAL_GENL_ATTR_CDEV_NAME]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_CDEV_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_CDEV_MAX_STATE]), arg);
+
+ case THERMAL_GENL_EVENT_CDEV_DELETE:
+ return ops->cdev_delete(nla_get_u32(attrs[THERMAL_GENL_ATTR_CDEV_ID]), arg);
+
+ case THERMAL_GENL_EVENT_CDEV_STATE_UPDATE:
+ return ops->cdev_update(nla_get_u32(attrs[THERMAL_GENL_ATTR_CDEV_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_CDEV_CUR_STATE]), arg);
+
+ case THERMAL_GENL_EVENT_TZ_GOV_CHANGE:
+ return ops->gov_change(nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]),
+ nla_get_string(attrs[THERMAL_GENL_ATTR_GOV_NAME]), arg);
+ default:
+ return -1;
+ }
+}
+
+static void thermal_events_ops_init(struct thermal_events_ops *ops)
+{
+ enabled_ops[THERMAL_GENL_EVENT_TZ_CREATE] = !!ops->tz_create;
+ enabled_ops[THERMAL_GENL_EVENT_TZ_DELETE] = !!ops->tz_delete;
+ enabled_ops[THERMAL_GENL_EVENT_TZ_DISABLE] = !!ops->tz_disable;
+ enabled_ops[THERMAL_GENL_EVENT_TZ_ENABLE] = !!ops->tz_enable;
+ enabled_ops[THERMAL_GENL_EVENT_TZ_TRIP_UP] = !!ops->trip_high;
+ enabled_ops[THERMAL_GENL_EVENT_TZ_TRIP_DOWN] = !!ops->trip_low;
+ enabled_ops[THERMAL_GENL_EVENT_TZ_TRIP_CHANGE] = !!ops->trip_change;
+ enabled_ops[THERMAL_GENL_EVENT_TZ_TRIP_ADD] = !!ops->trip_add;
+ enabled_ops[THERMAL_GENL_EVENT_TZ_TRIP_DELETE] = !!ops->trip_delete;
+ enabled_ops[THERMAL_GENL_EVENT_CDEV_ADD] = !!ops->cdev_add;
+ enabled_ops[THERMAL_GENL_EVENT_CDEV_DELETE] = !!ops->cdev_delete;
+ enabled_ops[THERMAL_GENL_EVENT_CDEV_STATE_UPDATE] = !!ops->cdev_update;
+ enabled_ops[THERMAL_GENL_EVENT_TZ_GOV_CHANGE] = !!ops->gov_change;
+}
+
+thermal_error_t thermal_events_handle(struct thermal_handler *th, void *arg)
+{
+ struct thermal_handler_param thp = { .th = th, .arg = arg };
+
+ if (!th)
+ return THERMAL_ERROR;
+
+ if (nl_cb_set(th->cb_event, NL_CB_VALID, NL_CB_CUSTOM,
+ handle_thermal_event, &thp))
+ return THERMAL_ERROR;
+
+ return nl_recvmsgs(th->sk_event, th->cb_event);
+}
+
+int thermal_events_fd(struct thermal_handler *th)
+{
+ if (!th)
+ return -1;
+
+ return nl_socket_get_fd(th->sk_event);
+}
+
+thermal_error_t thermal_events_exit(struct thermal_handler *th)
+{
+ if (nl_unsubscribe_thermal(th->sk_event, th->cb_event,
+ THERMAL_GENL_EVENT_GROUP_NAME))
+ return THERMAL_ERROR;
+
+ nl_thermal_disconnect(th->sk_event, th->cb_event);
+
+ return THERMAL_SUCCESS;
+}
+
+thermal_error_t thermal_events_init(struct thermal_handler *th)
+{
+ thermal_events_ops_init(&th->ops->events);
+
+ if (nl_thermal_connect(&th->sk_event, &th->cb_event))
+ return THERMAL_ERROR;
+
+ if (nl_subscribe_thermal(th->sk_event, th->cb_event,
+ THERMAL_GENL_EVENT_GROUP_NAME))
+ return THERMAL_ERROR;
+
+ return THERMAL_SUCCESS;
+}
diff --git a/tools/lib/thermal/include/thermal.h b/tools/lib/thermal/include/thermal.h
new file mode 100644
index 000000000000..1abc560602cf
--- /dev/null
+++ b/tools/lib/thermal/include/thermal.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+/* Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org> */
+#ifndef __LIBTHERMAL_H
+#define __LIBTHERMAL_H
+
+#include <linux/thermal.h>
+
+#ifndef LIBTHERMAL_API
+#define LIBTHERMAL_API __attribute__((visibility("default")))
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct thermal_sampling_ops {
+ int (*tz_temp)(int tz_id, int temp, void *arg);
+};
+
+struct thermal_events_ops {
+ int (*tz_create)(const char *name, int tz_id, void *arg);
+ int (*tz_delete)(int tz_id, void *arg);
+ int (*tz_enable)(int tz_id, void *arg);
+ int (*tz_disable)(int tz_id, void *arg);
+ int (*trip_high)(int tz_id, int trip_id, int temp, void *arg);
+ int (*trip_low)(int tz_id, int trip_id, int temp, void *arg);
+ int (*trip_add)(int tz_id, int trip_id, int type, int temp, int hyst, void *arg);
+ int (*trip_change)(int tz_id, int trip_id, int type, int temp, int hyst, void *arg);
+ int (*trip_delete)(int tz_id, int trip_id, void *arg);
+ int (*cdev_add)(const char *name, int cdev_id, int max_state, void *arg);
+ int (*cdev_delete)(int cdev_id, void *arg);
+ int (*cdev_update)(int cdev_id, int cur_state, void *arg);
+ int (*gov_change)(int tz_id, const char *gov_name, void *arg);
+};
+
+struct thermal_ops {
+ struct thermal_sampling_ops sampling;
+ struct thermal_events_ops events;
+};
+
+struct thermal_trip {
+ int id;
+ int type;
+ int temp;
+ int hyst;
+};
+
+struct thermal_zone {
+ int id;
+ int temp;
+ char name[THERMAL_NAME_LENGTH];
+ char governor[THERMAL_NAME_LENGTH];
+ struct thermal_trip *trip;
+};
+
+struct thermal_cdev {
+ int id;
+ char name[THERMAL_NAME_LENGTH];
+ int max_state;
+ int min_state;
+ int cur_state;
+};
+
+typedef enum {
+ THERMAL_ERROR = -1,
+ THERMAL_SUCCESS = 0,
+} thermal_error_t;
+
+struct thermal_handler;
+
+typedef int (*cb_tz_t)(struct thermal_zone *, void *);
+
+typedef int (*cb_tt_t)(struct thermal_trip *, void *);
+
+typedef int (*cb_tc_t)(struct thermal_cdev *, void *);
+
+LIBTHERMAL_API int for_each_thermal_zone(struct thermal_zone *tz, cb_tz_t cb, void *arg);
+
+LIBTHERMAL_API int for_each_thermal_trip(struct thermal_trip *tt, cb_tt_t cb, void *arg);
+
+LIBTHERMAL_API int for_each_thermal_cdev(struct thermal_cdev *cdev, cb_tc_t cb, void *arg);
+
+LIBTHERMAL_API struct thermal_zone *thermal_zone_find_by_name(struct thermal_zone *tz,
+ const char *name);
+
+LIBTHERMAL_API struct thermal_zone *thermal_zone_find_by_id(struct thermal_zone *tz, int id);
+
+LIBTHERMAL_API struct thermal_zone *thermal_zone_discover(struct thermal_handler *th);
+
+LIBTHERMAL_API struct thermal_handler *thermal_init(struct thermal_ops *ops);
+
+LIBTHERMAL_API void thermal_exit(struct thermal_handler *th);
+
+/*
+ * Netlink thermal events
+ */
+LIBTHERMAL_API thermal_error_t thermal_events_exit(struct thermal_handler *th);
+
+LIBTHERMAL_API thermal_error_t thermal_events_init(struct thermal_handler *th);
+
+LIBTHERMAL_API thermal_error_t thermal_events_handle(struct thermal_handler *th, void *arg);
+
+LIBTHERMAL_API int thermal_events_fd(struct thermal_handler *th);
+
+/*
+ * Netlink thermal commands
+ */
+LIBTHERMAL_API thermal_error_t thermal_cmd_exit(struct thermal_handler *th);
+
+LIBTHERMAL_API thermal_error_t thermal_cmd_init(struct thermal_handler *th);
+
+LIBTHERMAL_API thermal_error_t thermal_cmd_get_tz(struct thermal_handler *th,
+ struct thermal_zone **tz);
+
+LIBTHERMAL_API thermal_error_t thermal_cmd_get_cdev(struct thermal_handler *th,
+ struct thermal_cdev **tc);
+
+LIBTHERMAL_API thermal_error_t thermal_cmd_get_trip(struct thermal_handler *th,
+ struct thermal_zone *tz);
+
+LIBTHERMAL_API thermal_error_t thermal_cmd_get_governor(struct thermal_handler *th,
+ struct thermal_zone *tz);
+
+LIBTHERMAL_API thermal_error_t thermal_cmd_get_temp(struct thermal_handler *th,
+ struct thermal_zone *tz);
+
+/*
+ * Netlink thermal samples
+ */
+LIBTHERMAL_API thermal_error_t thermal_sampling_exit(struct thermal_handler *th);
+
+LIBTHERMAL_API thermal_error_t thermal_sampling_init(struct thermal_handler *th);
+
+LIBTHERMAL_API thermal_error_t thermal_sampling_handle(struct thermal_handler *th, void *arg);
+
+LIBTHERMAL_API int thermal_sampling_fd(struct thermal_handler *th);
+
+#endif /* __LIBTHERMAL_H */
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/tools/lib/thermal/libthermal.map b/tools/lib/thermal/libthermal.map
new file mode 100644
index 000000000000..d5e77738c7a4
--- /dev/null
+++ b/tools/lib/thermal/libthermal.map
@@ -0,0 +1,25 @@
+LIBTHERMAL_0.0.1 {
+ global:
+ thermal_init;
+ for_each_thermal_zone;
+ for_each_thermal_trip;
+ for_each_thermal_cdev;
+ thermal_zone_find_by_name;
+ thermal_zone_find_by_id;
+ thermal_zone_discover;
+ thermal_init;
+ thermal_events_init;
+ thermal_events_handle;
+ thermal_events_fd;
+ thermal_cmd_init;
+ thermal_cmd_get_tz;
+ thermal_cmd_get_cdev;
+ thermal_cmd_get_trip;
+ thermal_cmd_get_governor;
+ thermal_cmd_get_temp;
+ thermal_sampling_init;
+ thermal_sampling_handle;
+ thermal_sampling_fd;
+local:
+ *;
+};
diff --git a/tools/lib/thermal/libthermal.pc.template b/tools/lib/thermal/libthermal.pc.template
new file mode 100644
index 000000000000..6f3769731b59
--- /dev/null
+++ b/tools/lib/thermal/libthermal.pc.template
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+prefix=@PREFIX@
+libdir=@LIBDIR@
+includedir=${prefix}/include
+
+Name: libthermal
+Description: thermal library
+Requires: libnl-3.0 libnl-genl-3.0
+Version: @VERSION@
+Libs: -L${libdir} -lnl-genl-3 -lnl-3
+Cflags: -I${includedir} -I{include}/libnl3
diff --git a/tools/lib/thermal/sampling.c b/tools/lib/thermal/sampling.c
new file mode 100644
index 000000000000..ee818f4e9654
--- /dev/null
+++ b/tools/lib/thermal/sampling.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: LGPL-2.1+
+// Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <thermal.h>
+#include "thermal_nl.h"
+
+static int handle_thermal_sample(struct nl_msg *n, void *arg)
+{
+ struct nlmsghdr *nlh = nlmsg_hdr(n);
+ struct genlmsghdr *genlhdr = genlmsg_hdr(nlh);
+ struct nlattr *attrs[THERMAL_GENL_ATTR_MAX + 1];
+ struct thermal_handler_param *thp = arg;
+ struct thermal_handler *th = thp->th;
+
+ genlmsg_parse(nlh, 0, attrs, THERMAL_GENL_ATTR_MAX, NULL);
+
+ switch (genlhdr->cmd) {
+
+ case THERMAL_GENL_SAMPLING_TEMP:
+ return th->ops->sampling.tz_temp(
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TEMP]), arg);
+ default:
+ return THERMAL_ERROR;
+ }
+}
+
+thermal_error_t thermal_sampling_handle(struct thermal_handler *th, void *arg)
+{
+ struct thermal_handler_param thp = { .th = th, .arg = arg };
+
+ if (!th)
+ return THERMAL_ERROR;
+
+ if (nl_cb_set(th->cb_sampling, NL_CB_VALID, NL_CB_CUSTOM,
+ handle_thermal_sample, &thp))
+ return THERMAL_ERROR;
+
+ return nl_recvmsgs(th->sk_sampling, th->cb_sampling);
+}
+
+int thermal_sampling_fd(struct thermal_handler *th)
+{
+ if (!th)
+ return -1;
+
+ return nl_socket_get_fd(th->sk_sampling);
+}
+
+thermal_error_t thermal_sampling_exit(struct thermal_handler *th)
+{
+ if (nl_unsubscribe_thermal(th->sk_sampling, th->cb_sampling,
+ THERMAL_GENL_EVENT_GROUP_NAME))
+ return THERMAL_ERROR;
+
+ nl_thermal_disconnect(th->sk_sampling, th->cb_sampling);
+
+ return THERMAL_SUCCESS;
+}
+
+thermal_error_t thermal_sampling_init(struct thermal_handler *th)
+{
+ if (nl_thermal_connect(&th->sk_sampling, &th->cb_sampling))
+ return THERMAL_ERROR;
+
+ if (nl_subscribe_thermal(th->sk_sampling, th->cb_sampling,
+ THERMAL_GENL_SAMPLING_GROUP_NAME))
+ return THERMAL_ERROR;
+
+ return THERMAL_SUCCESS;
+}
diff --git a/tools/lib/thermal/thermal.c b/tools/lib/thermal/thermal.c
new file mode 100644
index 000000000000..72a76dc205bc
--- /dev/null
+++ b/tools/lib/thermal/thermal.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: LGPL-2.1+
+// Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org>
+#include <stdio.h>
+#include <thermal.h>
+
+#include "thermal_nl.h"
+
+int for_each_thermal_cdev(struct thermal_cdev *cdev, cb_tc_t cb, void *arg)
+{
+ int i, ret = 0;
+
+ if (!cdev)
+ return 0;
+
+ for (i = 0; cdev[i].id != -1; i++)
+ ret |= cb(&cdev[i], arg);
+
+ return ret;
+}
+
+int for_each_thermal_trip(struct thermal_trip *tt, cb_tt_t cb, void *arg)
+{
+ int i, ret = 0;
+
+ if (!tt)
+ return 0;
+
+ for (i = 0; tt[i].id != -1; i++)
+ ret |= cb(&tt[i], arg);
+
+ return ret;
+}
+
+int for_each_thermal_zone(struct thermal_zone *tz, cb_tz_t cb, void *arg)
+{
+ int i, ret = 0;
+
+ if (!tz)
+ return 0;
+
+ for (i = 0; tz[i].id != -1; i++)
+ ret |= cb(&tz[i], arg);
+
+ return ret;
+}
+
+struct thermal_zone *thermal_zone_find_by_name(struct thermal_zone *tz,
+ const char *name)
+{
+ int i;
+
+ if (!tz || !name)
+ return NULL;
+
+ for (i = 0; tz[i].id != -1; i++) {
+ if (!strcmp(tz[i].name, name))
+ return &tz[i];
+ }
+
+ return NULL;
+}
+
+struct thermal_zone *thermal_zone_find_by_id(struct thermal_zone *tz, int id)
+{
+ int i;
+
+ if (!tz || id < 0)
+ return NULL;
+
+ for (i = 0; tz[i].id != -1; i++) {
+ if (tz[i].id == id)
+ return &tz[i];
+ }
+
+ return NULL;
+}
+
+static int __thermal_zone_discover(struct thermal_zone *tz, void *th)
+{
+ if (thermal_cmd_get_trip(th, tz) < 0)
+ return -1;
+
+ if (thermal_cmd_get_governor(th, tz))
+ return -1;
+
+ return 0;
+}
+
+struct thermal_zone *thermal_zone_discover(struct thermal_handler *th)
+{
+ struct thermal_zone *tz;
+
+ if (thermal_cmd_get_tz(th, &tz) < 0)
+ return NULL;
+
+ if (for_each_thermal_zone(tz, __thermal_zone_discover, th))
+ return NULL;
+
+ return tz;
+}
+
+void thermal_exit(struct thermal_handler *th)
+{
+ thermal_cmd_exit(th);
+ thermal_events_exit(th);
+ thermal_sampling_exit(th);
+
+ free(th);
+}
+
+struct thermal_handler *thermal_init(struct thermal_ops *ops)
+{
+ struct thermal_handler *th;
+
+ th = malloc(sizeof(*th));
+ if (!th)
+ return NULL;
+ th->ops = ops;
+
+ if (thermal_events_init(th))
+ goto out_free;
+
+ if (thermal_sampling_init(th))
+ goto out_free;
+
+ if (thermal_cmd_init(th))
+ goto out_free;
+
+ return th;
+
+out_free:
+ free(th);
+
+ return NULL;
+}
diff --git a/tools/lib/thermal/thermal_nl.c b/tools/lib/thermal/thermal_nl.c
new file mode 100644
index 000000000000..b05cf9569858
--- /dev/null
+++ b/tools/lib/thermal/thermal_nl.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: LGPL-2.1+
+// Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <thermal.h>
+#include "thermal_nl.h"
+
+struct handler_args {
+ const char *group;
+ int id;
+};
+
+static __thread int err;
+static __thread int done;
+
+static int nl_seq_check_handler(struct nl_msg *msg, void *arg)
+{
+ return NL_OK;
+}
+
+static int nl_error_handler(struct sockaddr_nl *nla, struct nlmsgerr *nl_err,
+ void *arg)
+{
+ int *ret = arg;
+
+ if (ret)
+ *ret = nl_err->error;
+
+ return NL_STOP;
+}
+
+static int nl_finish_handler(struct nl_msg *msg, void *arg)
+{
+ int *ret = arg;
+
+ if (ret)
+ *ret = 1;
+
+ return NL_OK;
+}
+
+static int nl_ack_handler(struct nl_msg *msg, void *arg)
+{
+ int *ret = arg;
+
+ if (ret)
+ *ret = 1;
+
+ return NL_OK;
+}
+
+int nl_send_msg(struct nl_sock *sock, struct nl_cb *cb, struct nl_msg *msg,
+ int (*rx_handler)(struct nl_msg *, void *), void *data)
+{
+ if (!rx_handler)
+ return THERMAL_ERROR;
+
+ err = nl_send_auto_complete(sock, msg);
+ if (err < 0)
+ return err;
+
+ nl_cb_set(cb, NL_CB_VALID, NL_CB_CUSTOM, rx_handler, data);
+
+ err = done = 0;
+
+ while (err == 0 && done == 0)
+ nl_recvmsgs(sock, cb);
+
+ return err;
+}
+
+static int nl_family_handler(struct nl_msg *msg, void *arg)
+{
+ struct handler_args *grp = arg;
+ struct nlattr *tb[CTRL_ATTR_MAX + 1];
+ struct genlmsghdr *gnlh = nlmsg_data(nlmsg_hdr(msg));
+ struct nlattr *mcgrp;
+ int rem_mcgrp;
+
+ nla_parse(tb, CTRL_ATTR_MAX, genlmsg_attrdata(gnlh, 0),
+ genlmsg_attrlen(gnlh, 0), NULL);
+
+ if (!tb[CTRL_ATTR_MCAST_GROUPS])
+ return THERMAL_ERROR;
+
+ nla_for_each_nested(mcgrp, tb[CTRL_ATTR_MCAST_GROUPS], rem_mcgrp) {
+
+ struct nlattr *tb_mcgrp[CTRL_ATTR_MCAST_GRP_MAX + 1];
+
+ nla_parse(tb_mcgrp, CTRL_ATTR_MCAST_GRP_MAX,
+ nla_data(mcgrp), nla_len(mcgrp), NULL);
+
+ if (!tb_mcgrp[CTRL_ATTR_MCAST_GRP_NAME] ||
+ !tb_mcgrp[CTRL_ATTR_MCAST_GRP_ID])
+ continue;
+
+ if (strncmp(nla_data(tb_mcgrp[CTRL_ATTR_MCAST_GRP_NAME]),
+ grp->group,
+ nla_len(tb_mcgrp[CTRL_ATTR_MCAST_GRP_NAME])))
+ continue;
+
+ grp->id = nla_get_u32(tb_mcgrp[CTRL_ATTR_MCAST_GRP_ID]);
+
+ break;
+ }
+
+ return THERMAL_SUCCESS;
+}
+
+static int nl_get_multicast_id(struct nl_sock *sock, struct nl_cb *cb,
+ const char *family, const char *group)
+{
+ struct nl_msg *msg;
+ int ret = 0, ctrlid;
+ struct handler_args grp = {
+ .group = group,
+ .id = -ENOENT,
+ };
+
+ msg = nlmsg_alloc();
+ if (!msg)
+ return THERMAL_ERROR;
+
+ ctrlid = genl_ctrl_resolve(sock, "nlctrl");
+
+ genlmsg_put(msg, 0, 0, ctrlid, 0, 0, CTRL_CMD_GETFAMILY, 0);
+
+ nla_put_string(msg, CTRL_ATTR_FAMILY_NAME, family);
+
+ ret = nl_send_msg(sock, cb, msg, nl_family_handler, &grp);
+ if (ret)
+ goto nla_put_failure;
+
+ ret = grp.id;
+
+nla_put_failure:
+ nlmsg_free(msg);
+ return ret;
+}
+
+int nl_thermal_connect(struct nl_sock **nl_sock, struct nl_cb **nl_cb)
+{
+ struct nl_cb *cb;
+ struct nl_sock *sock;
+
+ cb = nl_cb_alloc(NL_CB_DEFAULT);
+ if (!cb)
+ return THERMAL_ERROR;
+
+ sock = nl_socket_alloc();
+ if (!sock)
+ goto out_cb_free;
+
+ if (genl_connect(sock))
+ goto out_socket_free;
+
+ if (nl_cb_err(cb, NL_CB_CUSTOM, nl_error_handler, &err) ||
+ nl_cb_set(cb, NL_CB_FINISH, NL_CB_CUSTOM, nl_finish_handler, &done) ||
+ nl_cb_set(cb, NL_CB_ACK, NL_CB_CUSTOM, nl_ack_handler, &done) ||
+ nl_cb_set(cb, NL_CB_SEQ_CHECK, NL_CB_CUSTOM, nl_seq_check_handler, &done))
+ return THERMAL_ERROR;
+
+ *nl_sock = sock;
+ *nl_cb = cb;
+
+ return THERMAL_SUCCESS;
+
+out_socket_free:
+ nl_socket_free(sock);
+out_cb_free:
+ nl_cb_put(cb);
+ return THERMAL_ERROR;
+}
+
+void nl_thermal_disconnect(struct nl_sock *nl_sock, struct nl_cb *nl_cb)
+{
+ nl_close(nl_sock);
+ nl_socket_free(nl_sock);
+ nl_cb_put(nl_cb);
+}
+
+int nl_unsubscribe_thermal(struct nl_sock *nl_sock, struct nl_cb *nl_cb,
+ const char *group)
+{
+ int mcid;
+
+ mcid = nl_get_multicast_id(nl_sock, nl_cb, THERMAL_GENL_FAMILY_NAME,
+ group);
+ if (mcid < 0)
+ return THERMAL_ERROR;
+
+ if (nl_socket_drop_membership(nl_sock, mcid))
+ return THERMAL_ERROR;
+
+ return THERMAL_SUCCESS;
+}
+
+int nl_subscribe_thermal(struct nl_sock *nl_sock, struct nl_cb *nl_cb,
+ const char *group)
+{
+ int mcid;
+
+ mcid = nl_get_multicast_id(nl_sock, nl_cb, THERMAL_GENL_FAMILY_NAME,
+ group);
+ if (mcid < 0)
+ return THERMAL_ERROR;
+
+ if (nl_socket_add_membership(nl_sock, mcid))
+ return THERMAL_ERROR;
+
+ return THERMAL_SUCCESS;
+}
diff --git a/tools/lib/thermal/thermal_nl.h b/tools/lib/thermal/thermal_nl.h
new file mode 100644
index 000000000000..ddf635642f07
--- /dev/null
+++ b/tools/lib/thermal/thermal_nl.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+/* Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org> */
+#ifndef __THERMAL_H
+#define __THERMAL_H
+
+#include <netlink/netlink.h>
+#include <netlink/genl/genl.h>
+#include <netlink/genl/mngt.h>
+#include <netlink/genl/ctrl.h>
+
+struct thermal_handler {
+ int done;
+ int error;
+ struct thermal_ops *ops;
+ struct nl_msg *msg;
+ struct nl_sock *sk_event;
+ struct nl_sock *sk_sampling;
+ struct nl_sock *sk_cmd;
+ struct nl_cb *cb_cmd;
+ struct nl_cb *cb_event;
+ struct nl_cb *cb_sampling;
+};
+
+struct thermal_handler_param {
+ struct thermal_handler *th;
+ void *arg;
+};
+
+/*
+ * Low level netlink
+ */
+extern int nl_subscribe_thermal(struct nl_sock *nl_sock, struct nl_cb *nl_cb,
+ const char *group);
+
+extern int nl_unsubscribe_thermal(struct nl_sock *nl_sock, struct nl_cb *nl_cb,
+ const char *group);
+
+extern int nl_thermal_connect(struct nl_sock **nl_sock, struct nl_cb **nl_cb);
+
+extern void nl_thermal_disconnect(struct nl_sock *nl_sock, struct nl_cb *nl_cb);
+
+extern int nl_send_msg(struct nl_sock *sock, struct nl_cb *nl_cb, struct nl_msg *msg,
+ int (*rx_handler)(struct nl_msg *, void *),
+ void *data);
+
+#endif /* __THERMAL_H */
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index e66d717c245d..a3a9cc24e0e3 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -19,8 +19,8 @@ LIBSUBCMD = $(LIBSUBCMD_OUTPUT)libsubcmd.a
OBJTOOL := $(OUTPUT)objtool
OBJTOOL_IN := $(OBJTOOL)-in.o
-LIBELF_FLAGS := $(shell pkg-config libelf --cflags 2>/dev/null)
-LIBELF_LIBS := $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
+LIBELF_FLAGS := $(shell $(HOSTPKG_CONFIG) libelf --cflags 2>/dev/null)
+LIBELF_LIBS := $(shell $(HOSTPKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf)
all: $(OBJTOOL)
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 8b990a52aada..c260006106be 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -787,3 +787,8 @@ bool arch_is_retpoline(struct symbol *sym)
{
return !strncmp(sym->name, "__x86_indirect_", 15);
}
+
+bool arch_is_rethunk(struct symbol *sym)
+{
+ return !strcmp(sym->name, "__x86_return_thunk");
+}
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index f4c3a5091737..24fbe803a0d3 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -68,6 +68,8 @@ const struct option check_options[] = {
OPT_BOOLEAN('n', "noinstr", &opts.noinstr, "validate noinstr rules"),
OPT_BOOLEAN('o', "orc", &opts.orc, "generate ORC metadata"),
OPT_BOOLEAN('r', "retpoline", &opts.retpoline, "validate and annotate retpoline usage"),
+ OPT_BOOLEAN(0, "rethunk", &opts.rethunk, "validate and annotate rethunk usage"),
+ OPT_BOOLEAN(0, "unret", &opts.unret, "validate entry unret placement"),
OPT_BOOLEAN('l', "sls", &opts.sls, "validate straight-line-speculation mitigations"),
OPT_BOOLEAN('s', "stackval", &opts.stackval, "validate frame pointer rules"),
OPT_BOOLEAN('t', "static-call", &opts.static_call, "annotate static calls"),
@@ -123,6 +125,7 @@ static bool opts_valid(void)
opts.noinstr ||
opts.orc ||
opts.retpoline ||
+ opts.rethunk ||
opts.sls ||
opts.stackval ||
opts.static_call ||
@@ -135,6 +138,11 @@ static bool opts_valid(void)
return true;
}
+ if (opts.unret && !opts.rethunk) {
+ ERROR("--unret requires --rethunk");
+ return false;
+ }
+
if (opts.dump_orc)
return true;
@@ -163,6 +171,11 @@ static bool link_opts_valid(struct objtool_file *file)
return false;
}
+ if (opts.unret) {
+ ERROR("--unret requires --link");
+ return false;
+ }
+
return true;
}
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 190b2f6e360a..0cec74da7ffe 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -185,7 +185,9 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
"do_group_exit",
"stop_this_cpu",
"__invalid_creds",
- "cpu_startup_entry",
+ "cpu_startup_entry",
+ "__ubsan_handle_builtin_unreachable",
+ "ex_handler_msr_mce",
};
if (!func)
@@ -374,7 +376,8 @@ static int decode_instructions(struct objtool_file *file)
sec->text = true;
if (!strcmp(sec->name, ".noinstr.text") ||
- !strcmp(sec->name, ".entry.text"))
+ !strcmp(sec->name, ".entry.text") ||
+ !strncmp(sec->name, ".text.__x86.", 12))
sec->noinstr = true;
for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
@@ -747,6 +750,52 @@ static int create_retpoline_sites_sections(struct objtool_file *file)
return 0;
}
+static int create_return_sites_sections(struct objtool_file *file)
+{
+ struct instruction *insn;
+ struct section *sec;
+ int idx;
+
+ sec = find_section_by_name(file->elf, ".return_sites");
+ if (sec) {
+ WARN("file already has .return_sites, skipping");
+ return 0;
+ }
+
+ idx = 0;
+ list_for_each_entry(insn, &file->return_thunk_list, call_node)
+ idx++;
+
+ if (!idx)
+ return 0;
+
+ sec = elf_create_section(file->elf, ".return_sites", 0,
+ sizeof(int), idx);
+ if (!sec) {
+ WARN("elf_create_section: .return_sites");
+ return -1;
+ }
+
+ idx = 0;
+ list_for_each_entry(insn, &file->return_thunk_list, call_node) {
+
+ int *site = (int *)sec->data->d_buf + idx;
+ *site = 0;
+
+ if (elf_add_reloc_to_insn(file->elf, sec,
+ idx * sizeof(int),
+ R_X86_64_PC32,
+ insn->sec, insn->offset)) {
+ WARN("elf_add_reloc_to_insn: .return_sites");
+ return -1;
+ }
+
+ idx++;
+ }
+
+ return 0;
+}
+
static int create_ibt_endbr_seal_sections(struct objtool_file *file)
{
struct instruction *insn;
@@ -1081,6 +1130,11 @@ __weak bool arch_is_retpoline(struct symbol *sym)
return false;
}
+__weak bool arch_is_rethunk(struct symbol *sym)
+{
+ return false;
+}
+
#define NEGATIVE_RELOC ((void *)-1L)
static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
@@ -1248,6 +1302,19 @@ static void add_retpoline_call(struct objtool_file *file, struct instruction *in
annotate_call_site(file, insn, false);
}
+static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
+{
+ /*
+ * Return thunk tail calls are really just returns in disguise,
+ * so convert them accordingly.
+ */
+ insn->type = INSN_RETURN;
+ insn->retpoline_safe = true;
+
+ if (add)
+ list_add_tail(&insn->call_node, &file->return_thunk_list);
+}
+
static bool same_function(struct instruction *insn1, struct instruction *insn2)
{
return insn1->func->pfunc == insn2->func->pfunc;
@@ -1300,6 +1367,9 @@ static int add_jump_destinations(struct objtool_file *file)
} else if (reloc->sym->retpoline_thunk) {
add_retpoline_call(file, insn);
continue;
+ } else if (reloc->sym->return_thunk) {
+ add_return_call(file, insn, true);
+ continue;
} else if (insn->func) {
/*
* External sibling call or internal sibling call with
@@ -1318,6 +1388,21 @@ static int add_jump_destinations(struct objtool_file *file)
jump_dest = find_insn(file, dest_sec, dest_off);
if (!jump_dest) {
+ struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
+
+ /*
+ * This is a special case for zen_untrain_ret().
+ * It jumps to __x86_return_thunk(), but objtool
+ * can't find the thunk's starting RET
+ * instruction, because the RET is also in the
+ * middle of another instruction. Objtool only
+ * knows about the outer instruction.
+ */
+ if (sym && sym->return_thunk) {
+ add_return_call(file, insn, false);
+ continue;
+ }
+
WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
insn->sec, insn->offset, dest_sec->name,
dest_off);
@@ -1947,16 +2032,35 @@ static int read_unwind_hints(struct objtool_file *file)
insn->hint = true;
- if (opts.ibt && hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
+ if (hint->type == UNWIND_HINT_TYPE_SAVE) {
+ insn->hint = false;
+ insn->save = true;
+ continue;
+ }
+
+ if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
+ insn->restore = true;
+ continue;
+ }
+
+ if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
- if (sym && sym->bind == STB_GLOBAL &&
- insn->type != INSN_ENDBR && !insn->noendbr) {
- WARN_FUNC("UNWIND_HINT_IRET_REGS without ENDBR",
- insn->sec, insn->offset);
+ if (sym && sym->bind == STB_GLOBAL) {
+ if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
+ WARN_FUNC("UNWIND_HINT_IRET_REGS without ENDBR",
+ insn->sec, insn->offset);
+ }
+
+ insn->entry = 1;
}
}
+ if (hint->type == UNWIND_HINT_TYPE_ENTRY) {
+ hint->type = UNWIND_HINT_TYPE_CALL;
+ insn->entry = 1;
+ }
+
if (hint->type == UNWIND_HINT_TYPE_FUNC) {
insn->cfi = &func_cfi;
continue;
@@ -2030,8 +2134,10 @@ static int read_retpoline_hints(struct objtool_file *file)
}
if (insn->type != INSN_JUMP_DYNAMIC &&
- insn->type != INSN_CALL_DYNAMIC) {
- WARN_FUNC("retpoline_safe hint not an indirect jump/call",
+ insn->type != INSN_CALL_DYNAMIC &&
+ insn->type != INSN_RETURN &&
+ insn->type != INSN_NOP) {
+ WARN_FUNC("retpoline_safe hint not an indirect jump/call/ret/nop",
insn->sec, insn->offset);
return -1;
}
@@ -2182,6 +2288,9 @@ static int classify_symbols(struct objtool_file *file)
if (arch_is_retpoline(func))
func->retpoline_thunk = true;
+ if (arch_is_rethunk(func))
+ func->return_thunk = true;
+
if (!strcmp(func->name, "__fentry__"))
func->fentry = true;
@@ -3188,7 +3297,7 @@ static struct instruction *next_insn_to_validate(struct objtool_file *file,
* Follow the branch starting at the given instruction, and recursively follow
* any other branches (jumps). Meanwhile, track the frame pointer state at
* each instruction and validate all the rules described in
- * tools/objtool/Documentation/stack-validation.txt.
+ * tools/objtool/Documentation/objtool.txt.
*/
static int validate_branch(struct objtool_file *file, struct symbol *func,
struct instruction *insn, struct insn_state state)
@@ -3216,8 +3325,8 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
return 1;
}
- visited = 1 << state.uaccess;
- if (insn->visited) {
+ visited = VISITED_BRANCH << state.uaccess;
+ if (insn->visited & VISITED_BRANCH_MASK) {
if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
return 1;
@@ -3231,6 +3340,35 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
state.instr += insn->instr;
if (insn->hint) {
+ if (insn->restore) {
+ struct instruction *save_insn, *i;
+
+ i = insn;
+ save_insn = NULL;
+
+ sym_for_each_insn_continue_reverse(file, func, i) {
+ if (i->save) {
+ save_insn = i;
+ break;
+ }
+ }
+
+ if (!save_insn) {
+ WARN_FUNC("no corresponding CFI save for CFI restore",
+ sec, insn->offset);
+ return 1;
+ }
+
+ if (!save_insn->visited) {
+ WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo",
+ sec, insn->offset);
+ return 1;
+ }
+
+ insn->cfi = save_insn->cfi;
+ nr_cfi_reused++;
+ }
+
state.cfi = *insn->cfi;
} else {
/* XXX track if we actually changed state.cfi */
@@ -3431,6 +3569,145 @@ static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
return warnings;
}
+/*
+ * Validate rethunk entry constraint: must untrain RET before the first RET.
+ *
+ * Follow every branch (intra-function) and ensure ANNOTATE_UNRET_END comes
+ * before an actual RET instruction.
+ */
+static int validate_entry(struct objtool_file *file, struct instruction *insn)
+{
+ struct instruction *next, *dest;
+ int ret, warnings = 0;
+
+ for (;;) {
+ next = next_insn_to_validate(file, insn);
+
+ if (insn->visited & VISITED_ENTRY)
+ return 0;
+
+ insn->visited |= VISITED_ENTRY;
+
+ if (!insn->ignore_alts && !list_empty(&insn->alts)) {
+ struct alternative *alt;
+ bool skip_orig = false;
+
+ list_for_each_entry(alt, &insn->alts, list) {
+ if (alt->skip_orig)
+ skip_orig = true;
+
+ ret = validate_entry(file, alt->insn);
+ if (ret) {
+ if (opts.backtrace)
+ BT_FUNC("(alt)", insn);
+ return ret;
+ }
+ }
+
+ if (skip_orig)
+ return 0;
+ }
+
+ switch (insn->type) {
+
+ case INSN_CALL_DYNAMIC:
+ case INSN_JUMP_DYNAMIC:
+ case INSN_JUMP_DYNAMIC_CONDITIONAL:
+ WARN_FUNC("early indirect call", insn->sec, insn->offset);
+ return 1;
+
+ case INSN_JUMP_UNCONDITIONAL:
+ case INSN_JUMP_CONDITIONAL:
+ if (!is_sibling_call(insn)) {
+ if (!insn->jump_dest) {
+ WARN_FUNC("unresolved jump target after linking?!?",
+ insn->sec, insn->offset);
+ return -1;
+ }
+ ret = validate_entry(file, insn->jump_dest);
+ if (ret) {
+ if (opts.backtrace) {
+ BT_FUNC("(branch%s)", insn,
+ insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
+ }
+ return ret;
+ }
+
+ if (insn->type == INSN_JUMP_UNCONDITIONAL)
+ return 0;
+
+ break;
+ }
+
+ /* fallthrough */
+ case INSN_CALL:
+ dest = find_insn(file, insn->call_dest->sec,
+ insn->call_dest->offset);
+ if (!dest) {
+ WARN("Unresolved function after linking!?: %s",
+ insn->call_dest->name);
+ return -1;
+ }
+
+ ret = validate_entry(file, dest);
+ if (ret) {
+ if (opts.backtrace)
+ BT_FUNC("(call)", insn);
+ return ret;
+ }
+ /*
+ * If a call returns without error, it must have seen UNTRAIN_RET.
+ * Therefore any non-error return is a success.
+ */
+ return 0;
+
+ case INSN_RETURN:
+ WARN_FUNC("RET before UNTRAIN", insn->sec, insn->offset);
+ return 1;
+
+ case INSN_NOP:
+ if (insn->retpoline_safe)
+ return 0;
+ break;
+
+ default:
+ break;
+ }
+
+ if (!next) {
+ WARN_FUNC("teh end!", insn->sec, insn->offset);
+ return -1;
+ }
+ insn = next;
+ }
+
+ return warnings;
+}
+
+/*
+ * Validate that all branches starting at 'insn->entry' encounter UNRET_END
+ * before RET.
+ */
+static int validate_unret(struct objtool_file *file)
+{
+ struct instruction *insn;
+ int ret, warnings = 0;
+
+ for_each_insn(file, insn) {
+ if (!insn->entry)
+ continue;
+
+ ret = validate_entry(file, insn);
+ if (ret < 0) {
+ WARN_FUNC("Failed UNRET validation", insn->sec, insn->offset);
+ return ret;
+ }
+ warnings += ret;
+ }
+
+ return warnings;
+}
+
static int validate_retpoline(struct objtool_file *file)
{
struct instruction *insn;
@@ -3438,7 +3715,8 @@ static int validate_retpoline(struct objtool_file *file)
for_each_insn(file, insn) {
if (insn->type != INSN_JUMP_DYNAMIC &&
- insn->type != INSN_CALL_DYNAMIC)
+ insn->type != INSN_CALL_DYNAMIC &&
+ insn->type != INSN_RETURN)
continue;
if (insn->retpoline_safe)
@@ -3453,9 +3731,17 @@ static int validate_retpoline(struct objtool_file *file)
if (!strcmp(insn->sec->name, ".init.text") && !opts.module)
continue;
- WARN_FUNC("indirect %s found in RETPOLINE build",
- insn->sec, insn->offset,
- insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
+ if (insn->type == INSN_RETURN) {
+ if (opts.rethunk) {
+ WARN_FUNC("'naked' return found in RETHUNK build",
+ insn->sec, insn->offset);
+ } else
+ continue;
+ } else {
+ WARN_FUNC("indirect %s found in RETPOLINE build",
+ insn->sec, insn->offset,
+ insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
+ }
warnings++;
}
@@ -3824,8 +4110,7 @@ static int validate_ibt(struct objtool_file *file)
!strcmp(sec->name, "__bug_table") ||
!strcmp(sec->name, "__ex_table") ||
!strcmp(sec->name, "__jump_table") ||
- !strcmp(sec->name, "__mcount_loc") ||
- !strcmp(sec->name, "__tracepoints"))
+ !strcmp(sec->name, "__mcount_loc"))
continue;
list_for_each_entry(reloc, &sec->reloc->reloc_list, list)
@@ -3944,6 +4229,17 @@ int check(struct objtool_file *file)
warnings += ret;
}
+ if (opts.unret) {
+ /*
+ * Must be after validate_branch() and friends, it plays
+ * further games with insn->visited.
+ */
+ ret = validate_unret(file);
+ if (ret < 0)
+ return ret;
+ warnings += ret;
+ }
+
if (opts.ibt) {
ret = validate_ibt(file);
if (ret < 0)
@@ -3972,6 +4268,13 @@ int check(struct objtool_file *file)
warnings += ret;
}
+ if (opts.rethunk) {
+ ret = create_return_sites_sections(file);
+ if (ret < 0)
+ goto out;
+ warnings += ret;
+ }
+
if (opts.mcount) {
ret = create_mcount_loc_sections(file);
if (ret < 0)
diff --git a/tools/objtool/include/objtool/arch.h b/tools/objtool/include/objtool/arch.h
index 9b19cc304195..beb2f3aa94ff 100644
--- a/tools/objtool/include/objtool/arch.h
+++ b/tools/objtool/include/objtool/arch.h
@@ -89,6 +89,7 @@ const char *arch_ret_insn(int len);
int arch_decode_hint_reg(u8 sp_reg, int *base);
bool arch_is_retpoline(struct symbol *sym);
+bool arch_is_rethunk(struct symbol *sym);
int arch_rewrite_retpolines(struct objtool_file *file);
diff --git a/tools/objtool/include/objtool/builtin.h b/tools/objtool/include/objtool/builtin.h
index 280ea18b7f2b..42a52f1a0add 100644
--- a/tools/objtool/include/objtool/builtin.h
+++ b/tools/objtool/include/objtool/builtin.h
@@ -19,6 +19,8 @@ struct opts {
bool noinstr;
bool orc;
bool retpoline;
+ bool rethunk;
+ bool unret;
bool sls;
bool stackval;
bool static_call;
diff --git a/tools/objtool/include/objtool/check.h b/tools/objtool/include/objtool/check.h
index f10d7374f388..036129cebeee 100644
--- a/tools/objtool/include/objtool/check.h
+++ b/tools/objtool/include/objtool/check.h
@@ -46,16 +46,19 @@ struct instruction {
enum insn_type type;
unsigned long immediate;
- u8 dead_end : 1,
- ignore : 1,
- ignore_alts : 1,
- hint : 1,
- retpoline_safe : 1,
- noendbr : 1;
- /* 2 bit hole */
+ u16 dead_end : 1,
+ ignore : 1,
+ ignore_alts : 1,
+ hint : 1,
+ save : 1,
+ restore : 1,
+ retpoline_safe : 1,
+ noendbr : 1,
+ entry : 1;
+ /* 7 bit hole */
+
s8 instr;
u8 visited;
- /* u8 hole */
struct alt_group *alt_group;
struct symbol *call_dest;
@@ -69,6 +72,11 @@ struct instruction {
struct cfi_state *cfi;
};
+#define VISITED_BRANCH 0x01
+#define VISITED_BRANCH_UACCESS 0x02
+#define VISITED_BRANCH_MASK 0x03
+#define VISITED_ENTRY 0x04
+
static inline bool is_static_jump(struct instruction *insn)
{
return insn->type == INSN_JUMP_CONDITIONAL ||
diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h
index adebfbc2b518..16f4067b82ae 100644
--- a/tools/objtool/include/objtool/elf.h
+++ b/tools/objtool/include/objtool/elf.h
@@ -57,6 +57,7 @@ struct symbol {
u8 uaccess_safe : 1;
u8 static_call_tramp : 1;
u8 retpoline_thunk : 1;
+ u8 return_thunk : 1;
u8 fentry : 1;
u8 profiling_func : 1;
struct list_head pv_target;
diff --git a/tools/objtool/include/objtool/objtool.h b/tools/objtool/include/objtool/objtool.h
index a6e72d916807..7f2d1b095333 100644
--- a/tools/objtool/include/objtool/objtool.h
+++ b/tools/objtool/include/objtool/objtool.h
@@ -24,6 +24,7 @@ struct objtool_file {
struct list_head insn_list;
DECLARE_HASHTABLE(insn_hash, 20);
struct list_head retpoline_call_list;
+ struct list_head return_thunk_list;
struct list_head static_call_list;
struct list_head mcount_loc_list;
struct list_head endbr_list;
diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
index 512669ce064c..a7ecc32e3512 100644
--- a/tools/objtool/objtool.c
+++ b/tools/objtool/objtool.c
@@ -102,6 +102,7 @@ struct objtool_file *objtool_open_read(const char *_objname)
INIT_LIST_HEAD(&file.insn_list);
hash_init(file.insn_hash);
INIT_LIST_HEAD(&file.retpoline_call_list);
+ INIT_LIST_HEAD(&file.return_thunk_list);
INIT_LIST_HEAD(&file.static_call_list);
INIT_LIST_HEAD(&file.mcount_loc_list);
INIT_LIST_HEAD(&file.endbr_list);
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
index 20b8ab984d5f..4b9c71faa01a 100644
--- a/tools/perf/.gitignore
+++ b/tools/perf/.gitignore
@@ -19,7 +19,6 @@ perf.data
perf.data.old
output.svg
perf-archive
-perf-with-kcore
perf-iostat
tags
TAGS
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
index 33c2521cba4a..18fcc52809fb 100644
--- a/tools/perf/Documentation/perf-annotate.txt
+++ b/tools/perf/Documentation/perf-annotate.txt
@@ -147,6 +147,11 @@ include::itrace.txt[]
The period/hits keywords set the base the percentage is computed
on - the samples period or the number of samples (hits).
+--percent-limit::
+ Do not show functions which have an overhead under that percent on
+ stdio or stdio2 (Default: 0). Note that this is about selection of
+ functions to display, not about lines within the function.
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-report[1]
diff --git a/tools/perf/Documentation/perf-arm-spe.txt b/tools/perf/Documentation/perf-arm-spe.txt
new file mode 100644
index 000000000000..bf03222e9a68
--- /dev/null
+++ b/tools/perf/Documentation/perf-arm-spe.txt
@@ -0,0 +1,218 @@
+perf-arm-spe(1)
+================
+
+NAME
+----
+perf-arm-spe - Support for Arm Statistical Profiling Extension within Perf tools
+
+SYNOPSIS
+--------
+[verse]
+'perf record' -e arm_spe//
+
+DESCRIPTION
+-----------
+
+The SPE (Statistical Profiling Extension) feature provides accurate attribution of latencies and
+ events down to individual instructions. Rather than being interrupt-driven, it picks an
+instruction to sample and then captures data for it during execution. Data includes execution time
+in cycles. For loads and stores it also includes data address, cache miss events, and data origin.
+
+The sampling has 5 stages:
+
+ 1. Choose an operation
+ 2. Collect data about the operation
+ 3. Optionally discard the record based on a filter
+ 4. Write the record to memory
+ 5. Interrupt when the buffer is full
+
+Choose an operation
+~~~~~~~~~~~~~~~~~~~
+
+This is chosen from a sample population, for SPE this is an IMPLEMENTATION DEFINED choice of all
+architectural instructions or all micro-ops. Sampling happens at a programmable interval. The
+architecture provides a mechanism for the SPE driver to infer the minimum interval at which it should
+sample. This minimum interval is used by the driver if no interval is specified. A pseudo-random
+perturbation is also added to the sampling interval by default.
+
+Collect data about the operation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Program counter, PMU events, timings and data addresses related to the operation are recorded.
+Sampling ensures there is only one sampled operation is in flight.
+
+Optionally discard the record based on a filter
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Based on programmable criteria, choose whether to keep the record or discard it. If the record is
+discarded then the flow stops here for this sample.
+
+Write the record to memory
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The record is appended to a memory buffer
+
+Interrupt when the buffer is full
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When the buffer fills, an interrupt is sent and the driver signals Perf to collect the records.
+Perf saves the raw data in the perf.data file.
+
+Opening the file
+----------------
+
+Up until this point no decoding of the SPE data was done by either the kernel or Perf. Only when the
+recorded file is opened with 'perf report' or 'perf script' does the decoding happen. When decoding
+the data, Perf generates "synthetic samples" as if these were generated at the time of the
+recording. These samples are the same as if normal sampling was done by Perf without using SPE,
+although they may have more attributes associated with them. For example a normal sample may have
+just the instruction pointer, but an SPE sample can have data addresses and latency attributes.
+
+Why Sampling?
+-------------
+
+ - Sampling, rather than tracing, cuts down the profiling problem to something more manageable for
+ hardware. Only one sampled operation is in flight at a time.
+
+ - Allows precise attribution data, including: Full PC of instruction, data virtual and physical
+ addresses.
+
+ - Allows correlation between an instruction and events, such as TLB and cache miss. (Data source
+ indicates which particular cache was hit, but the meaning is implementation defined because
+ different implementations can have different cache configurations.)
+
+However, SPE does not provide any call-graph information, and relies on statistical methods.
+
+Collisions
+----------
+
+When an operation is sampled while a previous sampled operation has not finished, a collision
+occurs. The new sample is dropped. Collisions affect the integrity of the data, so the sample rate
+should be set to avoid collisions.
+
+The 'sample_collision' PMU event can be used to determine the number of lost samples. Although this
+count is based on collisions _before_ filtering occurs. Therefore this can not be used as an exact
+number for samples dropped that would have made it through the filter, but can be a rough
+guide.
+
+The effect of microarchitectural sampling
+-----------------------------------------
+
+If an implementation samples micro-operations instead of instructions, the results of sampling must
+be weighted accordingly.
+
+For example, if a given instruction A is always converted into two micro-operations, A0 and A1, it
+becomes twice as likely to appear in the sample population.
+
+The coarse effect of conversions, and, if applicable, sampling of speculative operations, can be
+estimated from the 'sample_pop' and 'inst_retired' PMU events.
+
+Kernel Requirements
+-------------------
+
+The ARM_SPE_PMU config must be set to build as either a module or statically.
+
+Depending on CPU model, the kernel may need to be booted with page table isolation disabled
+(kpti=off). If KPTI needs to be disabled, this will fail with a console message "profiling buffer
+inaccessible. Try passing 'kpti=off' on the kernel command line".
+
+Capturing SPE with perf command-line tools
+------------------------------------------
+
+You can record a session with SPE samples:
+
+ perf record -e arm_spe// -- ./mybench
+
+The sample period is set from the -c option, and because the minimum interval is used by default
+it's recommended to set this to a higher value. The value is written to PMSIRR.INTERVAL.
+
+Config parameters
+~~~~~~~~~~~~~~~~~
+
+These are placed between the // in the event and comma separated. For example '-e
+arm_spe/load_filter=1,min_latency=10/'
+
+ branch_filter=1 - collect branches only (PMSFCR.B)
+ event_filter=<mask> - filter on specific events (PMSEVFR) - see bitfield description below
+ jitter=1 - use jitter to avoid resonance when sampling (PMSIRR.RND)
+ load_filter=1 - collect loads only (PMSFCR.LD)
+ min_latency=<n> - collect only samples with this latency or higher* (PMSLATFR)
+ pa_enable=1 - collect physical address (as well as VA) of loads/stores (PMSCR.PA) - requires privilege
+ pct_enable=1 - collect physical timestamp instead of virtual timestamp (PMSCR.PCT) - requires privilege
+ store_filter=1 - collect stores only (PMSFCR.ST)
+ ts_enable=1 - enable timestamping with value of generic timer (PMSCR.TS)
+
++++*+++ Latency is the total latency from the point at which sampling started on that instruction, rather
+than only the execution latency.
+
+Only some events can be filtered on; these include:
+
+ bit 1 - instruction retired (i.e. omit speculative instructions)
+ bit 3 - L1D refill
+ bit 5 - TLB refill
+ bit 7 - mispredict
+ bit 11 - misaligned access
+
+So to sample just retired instructions:
+
+ perf record -e arm_spe/event_filter=2/ -- ./mybench
+
+or just mispredicted branches:
+
+ perf record -e arm_spe/event_filter=0x80/ -- ./mybench
+
+Viewing the data
+~~~~~~~~~~~~~~~~~
+
+By default perf report and perf script will assign samples to separate groups depending on the
+attributes/events of the SPE record. Because instructions can have multiple events associated with
+them, the samples in these groups are not necessarily unique. For example perf report shows these
+groups:
+
+ Available samples
+ 0 arm_spe//
+ 0 dummy:u
+ 21 l1d-miss
+ 897 l1d-access
+ 5 llc-miss
+ 7 llc-access
+ 2 tlb-miss
+ 1K tlb-access
+ 36 branch-miss
+ 0 remote-access
+ 900 memory
+
+The arm_spe// and dummy:u events are implementation details and are expected to be empty.
+
+To get a full list of unique samples that are not sorted into groups, set the itrace option to
+generate 'instruction' samples. The period option is also taken into account, so set it to 1
+instruction unless you want to further downsample the already sampled SPE data:
+
+ perf report --itrace=i1i
+
+Memory access details are also stored on the samples and this can be viewed with:
+
+ perf report --mem-mode
+
+Common errors
+~~~~~~~~~~~~~
+
+ - "Cannot find PMU `arm_spe'. Missing kernel support?"
+
+ Module not built or loaded, KPTI not disabled (see above), or running on a VM
+
+ - "Arm SPE CONTEXT packets not found in the traces."
+
+ Root privilege is required to collect context packets. But these only increase the accuracy of
+ assigning PIDs to kernel samples. For userspace sampling this can be ignored.
+
+ - Excessively large perf.data file size
+
+ Increase sampling interval (see above)
+
+
+SEE ALSO
+--------
+
+linkperf:perf-record[1], linkperf:perf-script[1], linkperf:perf-report[1],
+linkperf:perf-inject[1]
diff --git a/tools/perf/Documentation/perf-c2c.txt b/tools/perf/Documentation/perf-c2c.txt
index 3b6a2c84ea02..6f69173731aa 100644
--- a/tools/perf/Documentation/perf-c2c.txt
+++ b/tools/perf/Documentation/perf-c2c.txt
@@ -189,9 +189,10 @@ For each cacheline in the 1) list we display following data:
Total stores
- sum of all store accesses
- Store Reference - L1Hit, L1Miss
+ Store Reference - L1Hit, L1Miss, N/A
L1Hit - store accesses that hit L1
L1Miss - store accesses that missed L1
+ N/A - store accesses with memory level is not available
Core Load Hit - FB, L1, L2
- count of load hits in FB (Fill Buffer), L1 and L2 cache
@@ -210,8 +211,9 @@ For each offset in the 2) list we display following data:
HITM - Rmt, Lcl
- % of Remote/Local HITM accesses for given offset within cacheline
- Store Refs - L1 Hit, L1 Miss
- - % of store accesses that hit/missed L1 for given offset within cacheline
+ Store Refs - L1 Hit, L1 Miss, N/A
+ - % of store accesses that hit L1, missed L1 and N/A (no available) memory
+ level for given offset within cacheline
Data address - Offset
- offset address
diff --git a/tools/perf/Documentation/perf-intel-pt.txt b/tools/perf/Documentation/perf-intel-pt.txt
index ff58bd4c381b..238ab9d3cb93 100644
--- a/tools/perf/Documentation/perf-intel-pt.txt
+++ b/tools/perf/Documentation/perf-intel-pt.txt
@@ -468,6 +468,8 @@ ptw Enable PTWRITE packets which are produced when a ptwrite instruction
which contains "1" if the feature is supported and
"0" otherwise.
+ As an alternative, refer to "Emulated PTWRITE" further below.
+
fup_on_ptw Enable a FUP packet to follow the PTWRITE packet. The FUP packet
provides the address of the ptwrite instruction. In the absence of
fup_on_ptw, the decoder will use the address of the previous branch
@@ -1398,6 +1400,76 @@ There were none.
:17006 17006 [001] 11500.262869216: ffffffff8220116e error_entry+0xe ([guest.kernel.kallsyms]) pushq %rax
+Tracing Virtual Machines - Guest Code
+-------------------------------------
+
+A common case for KVM test programs is that the test program acts as the
+hypervisor, creating, running and destroying the virtual machine, and
+providing the guest object code from its own object code. In this case,
+the VM is not running an OS, but only the functions loaded into it by the
+hypervisor test program, and conveniently, loaded at the same virtual
+addresses. To support that, option "--guest-code" has been added to perf script
+and perf kvm report.
+
+Here is an example tracing a test program from the kernel's KVM selftests:
+
+ # perf record --kcore -e intel_pt/cyc/ -- tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test
+ [ perf record: Woken up 1 times to write data ]
+ [ perf record: Captured and wrote 0.280 MB perf.data ]
+ # perf script --guest-code --itrace=bep --ns -F-period,+addr,+flags
+ [SNIP]
+ tsc_msrs_test 18436 [007] 10897.962087733: branches: call ffffffffc13b2ff5 __vmx_vcpu_run+0x15 (vmlinux) => ffffffffc13b2f50 vmx_update_host_rsp+0x0 (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962087733: branches: return ffffffffc13b2f5d vmx_update_host_rsp+0xd (vmlinux) => ffffffffc13b2ffa __vmx_vcpu_run+0x1a (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962087733: branches: call ffffffffc13b303b __vmx_vcpu_run+0x5b (vmlinux) => ffffffffc13b2f80 vmx_vmenter+0x0 (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962087836: branches: vmentry ffffffffc13b2f82 vmx_vmenter+0x2 (vmlinux) => 0 [unknown] ([unknown])
+ [guest/18436] 18436 [007] 10897.962087836: branches: vmentry 0 [unknown] ([unknown]) => 402c81 guest_code+0x131 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test)
+ [guest/18436] 18436 [007] 10897.962087836: branches: call 402c81 guest_code+0x131 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dba0 ucall+0x0 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test)
+ [guest/18436] 18436 [007] 10897.962088248: branches: vmexit 40dba0 ucall+0x0 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 0 [unknown] ([unknown])
+ tsc_msrs_test 18436 [007] 10897.962088248: branches: vmexit 0 [unknown] ([unknown]) => ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962088248: branches: jmp ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) => ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962088256: branches: return ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) => ffffffffc13b3040 __vmx_vcpu_run+0x60 (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962088270: branches: return ffffffffc13b30b6 __vmx_vcpu_run+0xd6 (vmlinux) => ffffffffc13b2f2e vmx_vcpu_enter_exit+0x4e (vmlinux)
+ [SNIP]
+ tsc_msrs_test 18436 [007] 10897.962089321: branches: call ffffffffc13b2ff5 __vmx_vcpu_run+0x15 (vmlinux) => ffffffffc13b2f50 vmx_update_host_rsp+0x0 (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962089321: branches: return ffffffffc13b2f5d vmx_update_host_rsp+0xd (vmlinux) => ffffffffc13b2ffa __vmx_vcpu_run+0x1a (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962089321: branches: call ffffffffc13b303b __vmx_vcpu_run+0x5b (vmlinux) => ffffffffc13b2f80 vmx_vmenter+0x0 (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962089424: branches: vmentry ffffffffc13b2f82 vmx_vmenter+0x2 (vmlinux) => 0 [unknown] ([unknown])
+ [guest/18436] 18436 [007] 10897.962089424: branches: vmentry 0 [unknown] ([unknown]) => 40dba0 ucall+0x0 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test)
+ [guest/18436] 18436 [007] 10897.962089701: branches: jmp 40dc1b ucall+0x7b (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc39 ucall+0x99 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test)
+ [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc3c ucall+0x9c (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc20 ucall+0x80 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test)
+ [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc3c ucall+0x9c (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc20 ucall+0x80 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test)
+ [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc37 ucall+0x97 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc50 ucall+0xb0 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test)
+ [guest/18436] 18436 [007] 10897.962089878: branches: vmexit 40dc55 ucall+0xb5 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 0 [unknown] ([unknown])
+ tsc_msrs_test 18436 [007] 10897.962089878: branches: vmexit 0 [unknown] ([unknown]) => ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962089878: branches: jmp ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) => ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962089887: branches: return ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) => ffffffffc13b3040 __vmx_vcpu_run+0x60 (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962089901: branches: return ffffffffc13b30b6 __vmx_vcpu_run+0xd6 (vmlinux) => ffffffffc13b2f2e vmx_vcpu_enter_exit+0x4e (vmlinux)
+ [SNIP]
+
+ # perf kvm --guest-code --guest --host report -i perf.data --stdio | head -20
+
+ # To display the perf.data header info, please use --header/--header-only options.
+ #
+ #
+ # Total Lost Samples: 0
+ #
+ # Samples: 12 of event 'instructions'
+ # Event count (approx.): 2274583
+ #
+ # Children Self Command Shared Object Symbol
+ # ........ ........ ............. .................... ...........................................
+ #
+ 54.70% 0.00% tsc_msrs_test [kernel.vmlinux] [k] entry_SYSCALL_64_after_hwframe
+ |
+ ---entry_SYSCALL_64_after_hwframe
+ do_syscall_64
+ |
+ |--29.44%--syscall_exit_to_user_mode
+ | exit_to_user_mode_prepare
+ | task_work_run
+ | __fput
+
+
Event Trace
-----------
@@ -1471,6 +1543,99 @@ In that case the --itrace q option is forced because walking executable code
to reconstruct the control flow is not possible.
+Emulated PTWRITE
+----------------
+
+Later perf tools support a method to emulate the ptwrite instruction, which
+can be useful if hardware does not support the ptwrite instruction.
+
+Instead of using the ptwrite instruction, a function is used which produces
+a trace that encodes the payload data into TNT packets. Here is an example
+of the function:
+
+ #include <stdint.h>
+
+ void perf_emulate_ptwrite(uint64_t x)
+ __attribute__((externally_visible, noipa, no_instrument_function, naked));
+
+ #define PERF_EMULATE_PTWRITE_8_BITS \
+ "1: shl %rax\n" \
+ " jc 1f\n" \
+ "1: shl %rax\n" \
+ " jc 1f\n" \
+ "1: shl %rax\n" \
+ " jc 1f\n" \
+ "1: shl %rax\n" \
+ " jc 1f\n" \
+ "1: shl %rax\n" \
+ " jc 1f\n" \
+ "1: shl %rax\n" \
+ " jc 1f\n" \
+ "1: shl %rax\n" \
+ " jc 1f\n" \
+ "1: shl %rax\n" \
+ " jc 1f\n"
+
+ /* Undefined instruction */
+ #define PERF_EMULATE_PTWRITE_UD2 ".byte 0x0f, 0x0b\n"
+
+ #define PERF_EMULATE_PTWRITE_MAGIC PERF_EMULATE_PTWRITE_UD2 ".ascii \"perf,ptwrite \"\n"
+
+ void perf_emulate_ptwrite(uint64_t x __attribute__ ((__unused__)))
+ {
+ /* Assumes SysV ABI : x passed in rdi */
+ __asm__ volatile (
+ "jmp 1f\n"
+ PERF_EMULATE_PTWRITE_MAGIC
+ "1: mov %rdi, %rax\n"
+ PERF_EMULATE_PTWRITE_8_BITS
+ PERF_EMULATE_PTWRITE_8_BITS
+ PERF_EMULATE_PTWRITE_8_BITS
+ PERF_EMULATE_PTWRITE_8_BITS
+ PERF_EMULATE_PTWRITE_8_BITS
+ PERF_EMULATE_PTWRITE_8_BITS
+ PERF_EMULATE_PTWRITE_8_BITS
+ PERF_EMULATE_PTWRITE_8_BITS
+ "1: ret\n"
+ );
+ }
+
+For example, a test program with the function above:
+
+ #include <stdio.h>
+ #include <stdint.h>
+ #include <stdlib.h>
+
+ #include "perf_emulate_ptwrite.h"
+
+ int main(int argc, char *argv[])
+ {
+ uint64_t x = 0;
+
+ if (argc > 1)
+ x = strtoull(argv[1], NULL, 0);
+ perf_emulate_ptwrite(x);
+ return 0;
+ }
+
+Can be compiled and traced:
+
+ $ gcc -Wall -Wextra -O3 -g -o eg_ptw eg_ptw.c
+ $ perf record -e intel_pt//u ./eg_ptw 0x1234567890abcdef
+ [ perf record: Woken up 1 times to write data ]
+ [ perf record: Captured and wrote 0.017 MB perf.data ]
+ $ perf script --itrace=ew
+ eg_ptw 19875 [007] 8061.235912: ptwrite: IP: 0 payload: 0x1234567890abcdef 55701249a196 perf_emulate_ptwrite+0x16 (/home/user/eg_ptw)
+ $
+
+
+EXAMPLE
+-------
+
+Examples can be found on perf wiki page "Perf tools support for Intel® Processor Trace":
+
+https://perf.wiki.kernel.org/index.php/Perf_tools_support_for_Intel%C2%AE_Processor_Trace
+
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-kvm.txt b/tools/perf/Documentation/perf-kvm.txt
index cf95baef7b61..83c742adf86e 100644
--- a/tools/perf/Documentation/perf-kvm.txt
+++ b/tools/perf/Documentation/perf-kvm.txt
@@ -94,6 +94,9 @@ OPTIONS
kernel module information. Users copy it out from guest os.
--guestvmlinux=<path>::
Guest os kernel vmlinux.
+--guest-code::
+ Indicate that guest code can be found in the hypervisor process,
+ which is a common case for KVM test programs.
-v::
--verbose::
Be more verbose (show counter open errors, etc).
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
index b43222229807..656b537b2fba 100644
--- a/tools/perf/Documentation/perf-lock.txt
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -64,6 +64,27 @@ REPORT OPTIONS
--combine-locks::
Merge lock instances in the same class (based on name).
+-t::
+--threads::
+ The -t option is to show per-thread lock stat like below:
+
+ $ perf lock report -t -F acquired,contended,avg_wait
+
+ Name acquired contended avg wait (ns)
+
+ perf 240569 9 5784
+ swapper 106610 19 543
+ :15789 17370 2 14538
+ ContainerMgr 8981 6 874
+ sleep 5275 1 11281
+ ContainerThread 4416 4 944
+ RootPressureThr 3215 5 1215
+ rcu_preempt 2954 0 0
+ ContainerMgr 2560 0 0
+ unnamed 1873 0 0
+ EventManager_De 1845 1 636
+ futex-default-S 1609 0 0
+
INFO OPTIONS
------------
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 465be4e62a17..cf8ad50f3de1 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -33,7 +33,7 @@ OPTIONS
- a raw PMU event in the form of rN where N is a hexadecimal value
that represents the raw register encoding with the layout of the
event control registers as described by entries in
- /sys/bus/event_sources/devices/cpu/format/*.
+ /sys/bus/event_source/devices/cpu/format/*.
- a symbolic or raw PMU event followed by an optional colon
and a list of event modifiers, e.g., cpu-cycles:p. See the
@@ -758,6 +758,16 @@ include::intel-hybrid.txt[]
If the URLs is not specified, the value of DEBUGINFOD_URLS
system environment variable is used.
+--off-cpu::
+ Enable off-cpu profiling with BPF. The BPF program will collect
+ task scheduling information with (user) stacktrace and save them
+ as sample data of a software event named "offcpu-time". The
+ sample period will have the time the task slept in nanoseconds.
+
+ Note that BPF can collect stack traces using frame pointer ("fp")
+ only, as of now. So the applications built without the frame
+ pointer might see bogus addresses.
+
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-list[1], linkperf:perf-intel-pt[1]
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 2012a8e6c90b..1a557ff8f210 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -499,6 +499,10 @@ include::itrace.txt[]
The known limitations include exception handing such as
setjmp/longjmp will have calls/returns not match.
+--guest-code::
+ Indicate that guest code can be found in the hypervisor process,
+ which is a common case for KVM test programs.
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-script-perl[1],
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index c06c341e72b9..d8a33f4a47c5 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -39,7 +39,7 @@ report::
- a raw PMU event in the form of rN where N is a hexadecimal value
that represents the raw register encoding with the layout of the
event control registers as described by entries in
- /sys/bus/event_sources/devices/cpu/format/*.
+ /sys/bus/event_source/devices/cpu/format/*.
- a symbolic or raw PMU event followed by an optional colon
and a list of event modifiers, e.g., cpu-cycles:p. See the
@@ -454,6 +454,16 @@ Multiple events are created from a single event specification when:
2. Aliases, which are listed immediately after the Kernel PMU events
by perf list, are used.
+--hybrid-merge::
+Merge the hybrid event counts from all PMUs.
+
+For hybrid events, by default, the stat aggregates and reports the event
+counts per PMU. But sometimes, it's also useful to aggregate event counts
+from all PMUs. This option enables that behavior and reports the counts
+without PMUs.
+
+For non-hybrid events, it should be no effect.
+
--smi-cost::
Measure SMI cost if msr/aperf/ and msr/smi/ events are supported.
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index cac3dfbee7d8..c1fdba26bf53 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -41,7 +41,7 @@ Default is to monitor all CPUS.
(use 'perf list' to list all events) or a raw PMU event in the form
of rN where N is a hexadecimal value that represents the raw register
encoding with the layout of the event control registers as described
- by entries in /sys/bus/event_sources/devices/cpu/format/*.
+ by entries in /sys/bus/event_source/devices/cpu/format/*.
-E <entries>::
--entries=<entries>::
diff --git a/tools/perf/Documentation/perf.txt b/tools/perf/Documentation/perf.txt
index 71ebdf8125de..ba3df49c169d 100644
--- a/tools/perf/Documentation/perf.txt
+++ b/tools/perf/Documentation/perf.txt
@@ -77,7 +77,7 @@ linkperf:perf-stat[1], linkperf:perf-top[1],
linkperf:perf-record[1], linkperf:perf-report[1],
linkperf:perf-list[1]
-linkperf:perf-annotate[1],linkperf:perf-archive[1],
+linkperf:perf-annotate[1],linkperf:perf-archive[1],linkperf:perf-arm-spe[1],
linkperf:perf-bench[1], linkperf:perf-buildid-cache[1],
linkperf:perf-buildid-list[1], linkperf:perf-c2c[1],
linkperf:perf-config[1], linkperf:perf-data[1], linkperf:perf-diff[1],
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 1bd64e7404b9..73e0762092fe 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -239,18 +239,33 @@ ifdef PARSER_DEBUG
endif
# Try different combinations to accommodate systems that only have
-# python[2][-config] in weird combinations but always preferring
-# python2 and python2-config as per pep-0394. If python2 or python
-# aren't found, then python3 is used.
-PYTHON_AUTO := python
-PYTHON_AUTO := $(if $(call get-executable,python3),python3,$(PYTHON_AUTO))
-PYTHON_AUTO := $(if $(call get-executable,python),python,$(PYTHON_AUTO))
-PYTHON_AUTO := $(if $(call get-executable,python2),python2,$(PYTHON_AUTO))
-override PYTHON := $(call get-executable-or-default,PYTHON,$(PYTHON_AUTO))
-PYTHON_AUTO_CONFIG := \
- $(if $(call get-executable,$(PYTHON)-config),$(PYTHON)-config,python-config)
-override PYTHON_CONFIG := \
- $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON_AUTO_CONFIG))
+# python[2][3]-config in weird combinations in the following order of
+# priority from lowest to highest:
+# * python3-config
+# * python-config
+# * python2-config as per pep-0394.
+# * $(PYTHON)-config (If PYTHON is user supplied but PYTHON_CONFIG isn't)
+#
+PYTHON_AUTO := python-config
+PYTHON_AUTO := $(if $(call get-executable,python3-config),python3-config,$(PYTHON_AUTO))
+PYTHON_AUTO := $(if $(call get-executable,python-config),python-config,$(PYTHON_AUTO))
+PYTHON_AUTO := $(if $(call get-executable,python2-config),python2-config,$(PYTHON_AUTO))
+
+# If PYTHON is defined but PYTHON_CONFIG isn't, then take $(PYTHON)-config as if it was the user
+# supplied value for PYTHON_CONFIG. Because it's "user supplied", error out if it doesn't exist.
+ifdef PYTHON
+ ifndef PYTHON_CONFIG
+ PYTHON_CONFIG_AUTO := $(call get-executable,$(PYTHON)-config)
+ PYTHON_CONFIG := $(if $(PYTHON_CONFIG_AUTO),$(PYTHON_CONFIG_AUTO),\
+ $(call $(error $(PYTHON)-config not found)))
+ endif
+endif
+
+# Select either auto detected python and python-config or use user supplied values if they are
+# defined. get-executable-or-default fails with an error if the first argument is supplied but
+# doesn't exist.
+override PYTHON_CONFIG := $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON_AUTO))
+override PYTHON := $(call get-executable-or-default,PYTHON,$(subst -config,,$(PYTHON_AUTO)))
grep-libs = $(filter -l%,$(1))
strip-libs = $(filter-out -l%,$(1))
@@ -558,11 +573,36 @@ ifndef NO_LIBELF
ifeq ($(feature-libbpf-btf__load_from_kernel_by_id), 1)
CFLAGS += -DHAVE_LIBBPF_BTF__LOAD_FROM_KERNEL_BY_ID
endif
+ $(call feature_check,libbpf-bpf_prog_load)
+ ifeq ($(feature-libbpf-bpf_prog_load), 1)
+ CFLAGS += -DHAVE_LIBBPF_BPF_PROG_LOAD
+ endif
+ $(call feature_check,libbpf-bpf_object__next_program)
+ ifeq ($(feature-libbpf-bpf_object__next_program), 1)
+ CFLAGS += -DHAVE_LIBBPF_BPF_OBJECT__NEXT_PROGRAM
+ endif
+ $(call feature_check,libbpf-bpf_object__next_map)
+ ifeq ($(feature-libbpf-bpf_object__next_map), 1)
+ CFLAGS += -DHAVE_LIBBPF_BPF_OBJECT__NEXT_MAP
+ endif
+ $(call feature_check,libbpf-btf__raw_data)
+ ifeq ($(feature-libbpf-btf__raw_data), 1)
+ CFLAGS += -DHAVE_LIBBPF_BTF__RAW_DATA
+ endif
+ $(call feature_check,libbpf-bpf_map_create)
+ ifeq ($(feature-libbpf-bpf_map_create), 1)
+ CFLAGS += -DHAVE_LIBBPF_BPF_MAP_CREATE
+ endif
else
dummy := $(error Error: No libbpf devel library found, please install libbpf-devel);
endif
else
CFLAGS += -DHAVE_LIBBPF_BTF__LOAD_FROM_KERNEL_BY_ID
+ CFLAGS += -DHAVE_LIBBPF_BPF_PROG_LOAD
+ CFLAGS += -DHAVE_LIBBPF_BPF_OBJECT__NEXT_PROGRAM
+ CFLAGS += -DHAVE_LIBBPF_BPF_OBJECT__NEXT_MAP
+ CFLAGS += -DHAVE_LIBBPF_BTF__RAW_DATA
+ CFLAGS += -DHAVE_LIBBPF_BPF_MAP_CREATE
endif
endif
@@ -656,6 +696,9 @@ ifdef BUILD_BPF_SKEL
ifeq ($(feature-clang-bpf-co-re), 0)
dummy := $(error Error: clang too old/not installed. Please install recent clang to build with BUILD_BPF_SKEL)
endif
+ ifeq ($(filter -DHAVE_LIBBPF_SUPPORT, $(CFLAGS)),)
+ dummy := $(error Error: BPF skeleton support requires libbpf)
+ endif
$(call detected,CONFIG_PERF_BPF_SKEL)
CFLAGS += -DHAVE_BPF_SKEL
endif
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 69473a836bae..8f738e11356d 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -286,7 +286,6 @@ PYRF_OBJS =
SCRIPT_SH =
SCRIPT_SH += perf-archive.sh
-SCRIPT_SH += perf-with-kcore.sh
SCRIPT_SH += perf-iostat.sh
grep-libs = $(filter -l%,$(1))
@@ -973,8 +972,6 @@ ifndef NO_LIBBPF
endif
$(call QUIET_INSTALL, perf-archive) \
$(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
- $(call QUIET_INSTALL, perf-with-kcore) \
- $(INSTALL) $(OUTPUT)perf-with-kcore -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
$(call QUIET_INSTALL, perf-iostat) \
$(INSTALL) $(OUTPUT)perf-iostat -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
ifndef NO_LIBAUDIT
@@ -1041,6 +1038,7 @@ SKEL_TMP_OUT := $(abspath $(SKEL_OUT)/.tmp)
SKELETONS := $(SKEL_OUT)/bpf_prog_profiler.skel.h
SKELETONS += $(SKEL_OUT)/bperf_leader.skel.h $(SKEL_OUT)/bperf_follower.skel.h
SKELETONS += $(SKEL_OUT)/bperf_cgroup.skel.h $(SKEL_OUT)/func_latency.skel.h
+SKELETONS += $(SKEL_OUT)/off_cpu.skel.h
$(SKEL_TMP_OUT) $(LIBBPF_OUTPUT):
$(Q)$(MKDIR) -p $@
@@ -1088,7 +1086,7 @@ bpf-skel-clean:
$(call QUIET_CLEAN, bpf-skel) $(RM) -r $(SKEL_TMP_OUT) $(SKELETONS)
clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBPERF)-clean fixdep-clean python-clean bpf-skel-clean
- $(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(OUTPUT)perf-iostat $(LANG_BINDINGS)
+ $(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-iostat $(LANG_BINDINGS)
$(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
$(Q)$(RM) $(OUTPUT).config-detected
$(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(OUTPUT)pmu-events/jevents $(OUTPUT)$(LIBJVMTI).so
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 11c71aa219f7..1b54638d53b0 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -319,6 +319,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
}
evsel->core.attr.freq = 0;
evsel->core.attr.sample_period = 1;
+ evsel->needs_auxtrace_mmap = true;
cs_etm_evsel = evsel;
opts->full_auxtrace = true;
}
diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
index e8b577d33e53..6f4db2ac5420 100644
--- a/tools/perf/arch/arm64/util/arm-spe.c
+++ b/tools/perf/arch/arm64/util/arm-spe.c
@@ -160,6 +160,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
}
evsel->core.attr.freq = 0;
evsel->core.attr.sample_period = arm_spe_pmu->default_config->sample_period;
+ evsel->needs_auxtrace_mmap = true;
arm_spe_evsel = evsel;
opts->full_auxtrace = true;
}
diff --git a/tools/perf/arch/arm64/util/mem-events.c b/tools/perf/arch/arm64/util/mem-events.c
index be41721b9aa1..df817d1f9f3e 100644
--- a/tools/perf/arch/arm64/util/mem-events.c
+++ b/tools/perf/arch/arm64/util/mem-events.c
@@ -5,9 +5,9 @@
#define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s }
static struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
- E("spe-load", "arm_spe_0/ts_enable=1,load_filter=1,store_filter=0,min_latency=%u/", "arm_spe_0"),
- E("spe-store", "arm_spe_0/ts_enable=1,load_filter=0,store_filter=1/", "arm_spe_0"),
- E("spe-ldst", "arm_spe_0/ts_enable=1,load_filter=1,store_filter=1,min_latency=%u/", "arm_spe_0"),
+ E("spe-load", "arm_spe_0/ts_enable=1,pa_enable=1,load_filter=1,store_filter=0,min_latency=%u/", "arm_spe_0"),
+ E("spe-store", "arm_spe_0/ts_enable=1,pa_enable=1,load_filter=0,store_filter=1/", "arm_spe_0"),
+ E("spe-ldst", "arm_spe_0/ts_enable=1,pa_enable=1,load_filter=1,store_filter=1,min_latency=%u/", "arm_spe_0"),
};
static char mem_ev_name[100];
diff --git a/tools/perf/arch/arm64/util/perf_regs.c b/tools/perf/arch/arm64/util/perf_regs.c
index 476b037eea1c..006692c9b040 100644
--- a/tools/perf/arch/arm64/util/perf_regs.c
+++ b/tools/perf/arch/arm64/util/perf_regs.c
@@ -2,13 +2,19 @@
#include <errno.h>
#include <regex.h>
#include <string.h>
+#include <sys/auxv.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
+#include "../../../perf-sys.h"
#include "../../../util/debug.h"
#include "../../../util/event.h"
#include "../../../util/perf_regs.h"
+#ifndef HWCAP_SVE
+#define HWCAP_SVE (1 << 22)
+#endif
+
const struct sample_reg sample_reg_masks[] = {
SMPL_REG(x0, PERF_REG_ARM64_X0),
SMPL_REG(x1, PERF_REG_ARM64_X1),
@@ -43,6 +49,7 @@ const struct sample_reg sample_reg_masks[] = {
SMPL_REG(lr, PERF_REG_ARM64_LR),
SMPL_REG(sp, PERF_REG_ARM64_SP),
SMPL_REG(pc, PERF_REG_ARM64_PC),
+ SMPL_REG(vg, PERF_REG_ARM64_VG),
SMPL_REG_END
};
@@ -131,3 +138,34 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op)
return SDT_ARG_VALID;
}
+
+uint64_t arch__user_reg_mask(void)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .sample_type = PERF_SAMPLE_REGS_USER,
+ .disabled = 1,
+ .exclude_kernel = 1,
+ .sample_period = 1,
+ .sample_regs_user = PERF_REGS_MASK
+ };
+ int fd;
+
+ if (getauxval(AT_HWCAP) & HWCAP_SVE)
+ attr.sample_regs_user |= SMPL_REG_MASK(PERF_REG_ARM64_VG);
+
+ /*
+ * Check if the pmu supports perf extended regs, before
+ * returning the register mask to sample.
+ */
+ if (attr.sample_regs_user != PERF_REGS_MASK) {
+ event_attr_init(&attr);
+ fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+ if (fd != -1) {
+ close(fd);
+ return attr.sample_regs_user;
+ }
+ }
+ return PERF_REGS_MASK;
+}
diff --git a/tools/perf/arch/arm64/util/unwind-libunwind.c b/tools/perf/arch/arm64/util/unwind-libunwind.c
index 5aecf88e3de6..871af5992298 100644
--- a/tools/perf/arch/arm64/util/unwind-libunwind.c
+++ b/tools/perf/arch/arm64/util/unwind-libunwind.c
@@ -10,77 +10,8 @@
int LIBUNWIND__ARCH_REG_ID(int regnum)
{
- switch (regnum) {
- case UNW_AARCH64_X0:
- return PERF_REG_ARM64_X0;
- case UNW_AARCH64_X1:
- return PERF_REG_ARM64_X1;
- case UNW_AARCH64_X2:
- return PERF_REG_ARM64_X2;
- case UNW_AARCH64_X3:
- return PERF_REG_ARM64_X3;
- case UNW_AARCH64_X4:
- return PERF_REG_ARM64_X4;
- case UNW_AARCH64_X5:
- return PERF_REG_ARM64_X5;
- case UNW_AARCH64_X6:
- return PERF_REG_ARM64_X6;
- case UNW_AARCH64_X7:
- return PERF_REG_ARM64_X7;
- case UNW_AARCH64_X8:
- return PERF_REG_ARM64_X8;
- case UNW_AARCH64_X9:
- return PERF_REG_ARM64_X9;
- case UNW_AARCH64_X10:
- return PERF_REG_ARM64_X10;
- case UNW_AARCH64_X11:
- return PERF_REG_ARM64_X11;
- case UNW_AARCH64_X12:
- return PERF_REG_ARM64_X12;
- case UNW_AARCH64_X13:
- return PERF_REG_ARM64_X13;
- case UNW_AARCH64_X14:
- return PERF_REG_ARM64_X14;
- case UNW_AARCH64_X15:
- return PERF_REG_ARM64_X15;
- case UNW_AARCH64_X16:
- return PERF_REG_ARM64_X16;
- case UNW_AARCH64_X17:
- return PERF_REG_ARM64_X17;
- case UNW_AARCH64_X18:
- return PERF_REG_ARM64_X18;
- case UNW_AARCH64_X19:
- return PERF_REG_ARM64_X19;
- case UNW_AARCH64_X20:
- return PERF_REG_ARM64_X20;
- case UNW_AARCH64_X21:
- return PERF_REG_ARM64_X21;
- case UNW_AARCH64_X22:
- return PERF_REG_ARM64_X22;
- case UNW_AARCH64_X23:
- return PERF_REG_ARM64_X23;
- case UNW_AARCH64_X24:
- return PERF_REG_ARM64_X24;
- case UNW_AARCH64_X25:
- return PERF_REG_ARM64_X25;
- case UNW_AARCH64_X26:
- return PERF_REG_ARM64_X26;
- case UNW_AARCH64_X27:
- return PERF_REG_ARM64_X27;
- case UNW_AARCH64_X28:
- return PERF_REG_ARM64_X28;
- case UNW_AARCH64_X29:
- return PERF_REG_ARM64_X29;
- case UNW_AARCH64_X30:
- return PERF_REG_ARM64_LR;
- case UNW_AARCH64_SP:
- return PERF_REG_ARM64_SP;
- case UNW_AARCH64_PC:
- return PERF_REG_ARM64_PC;
- default:
- pr_err("unwind: invalid reg id %d\n", regnum);
+ if (regnum < 0 || regnum >= PERF_REG_ARM64_EXTENDED_MAX)
return -EINVAL;
- }
- return -EINVAL;
+ return regnum;
}
diff --git a/tools/perf/arch/riscv/Makefile b/tools/perf/arch/riscv/Makefile
index 1aa9dd772489..a8d25d005207 100644
--- a/tools/perf/arch/riscv/Makefile
+++ b/tools/perf/arch/riscv/Makefile
@@ -2,3 +2,4 @@ ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
endif
PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
+PERF_HAVE_JITDUMP := 1
diff --git a/tools/perf/arch/s390/util/auxtrace.c b/tools/perf/arch/s390/util/auxtrace.c
index 0db5c58c98e8..5068baa3e092 100644
--- a/tools/perf/arch/s390/util/auxtrace.c
+++ b/tools/perf/arch/s390/util/auxtrace.c
@@ -98,6 +98,7 @@ struct auxtrace_record *auxtrace_record__init(struct evlist *evlist,
evlist__for_each_entry(evlist, pos) {
if (pos->core.attr.config == PERF_EVENT_CPUM_SF_DIAG) {
diagnose = 1;
+ pos->needs_auxtrace_mmap = true;
break;
}
}
diff --git a/tools/perf/arch/x86/util/evlist.c b/tools/perf/arch/x86/util/evlist.c
index cfc208d71f00..68f681ad54c1 100644
--- a/tools/perf/arch/x86/util/evlist.c
+++ b/tools/perf/arch/x86/util/evlist.c
@@ -3,6 +3,7 @@
#include "util/pmu.h"
#include "util/evlist.h"
#include "util/parse-events.h"
+#include "topdown.h"
#define TOPDOWN_L1_EVENTS "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound}"
#define TOPDOWN_L2_EVENTS "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound,topdown-heavy-ops,topdown-br-mispredict,topdown-fetch-lat,topdown-mem-bound}"
@@ -25,18 +26,18 @@ struct evsel *arch_evlist__leader(struct list_head *list)
first = list_first_entry(list, struct evsel, core.node);
- if (!pmu_have_event("cpu", "slots"))
+ if (!topdown_sys_has_perf_metrics())
return first;
/* If there is a slots event and a topdown event then the slots event comes first. */
__evlist__for_each_entry(list, evsel) {
- if (evsel->pmu_name && !strcmp(evsel->pmu_name, "cpu") && evsel->name) {
+ if (evsel->pmu_name && !strncmp(evsel->pmu_name, "cpu", 3) && evsel->name) {
if (strcasestr(evsel->name, "slots")) {
slots = evsel;
if (slots == first)
return first;
}
- if (!strncasecmp(evsel->name, "topdown", 7))
+ if (strcasestr(evsel->name, "topdown"))
has_topdown = true;
if (slots && has_topdown)
return slots;
diff --git a/tools/perf/arch/x86/util/evsel.c b/tools/perf/arch/x86/util/evsel.c
index ac2899a25b7a..3501399cef35 100644
--- a/tools/perf/arch/x86/util/evsel.c
+++ b/tools/perf/arch/x86/util/evsel.c
@@ -3,7 +3,9 @@
#include <stdlib.h>
#include "util/evsel.h"
#include "util/env.h"
+#include "util/pmu.h"
#include "linux/string.h"
+#include "evsel.h"
void arch_evsel__set_sample_weight(struct evsel *evsel)
{
@@ -29,3 +31,33 @@ void arch_evsel__fixup_new_cycles(struct perf_event_attr *attr)
free(env.cpuid);
}
+
+/* Check whether the evsel's PMU supports the perf metrics */
+bool evsel__sys_has_perf_metrics(const struct evsel *evsel)
+{
+ const char *pmu_name = evsel->pmu_name ? evsel->pmu_name : "cpu";
+
+ /*
+ * The PERF_TYPE_RAW type is the core PMU type, e.g., "cpu" PMU
+ * on a non-hybrid machine, "cpu_core" PMU on a hybrid machine.
+ * The slots event is only available for the core PMU, which
+ * supports the perf metrics feature.
+ * Checking both the PERF_TYPE_RAW type and the slots event
+ * should be good enough to detect the perf metrics feature.
+ */
+ if ((evsel->core.attr.type == PERF_TYPE_RAW) &&
+ pmu_have_event(pmu_name, "slots"))
+ return true;
+
+ return false;
+}
+
+bool arch_evsel__must_be_in_group(const struct evsel *evsel)
+{
+ if (!evsel__sys_has_perf_metrics(evsel))
+ return false;
+
+ return evsel->name &&
+ (strcasestr(evsel->name, "slots") ||
+ strcasestr(evsel->name, "topdown"));
+}
diff --git a/tools/perf/arch/x86/util/evsel.h b/tools/perf/arch/x86/util/evsel.h
new file mode 100644
index 000000000000..19ad1691374d
--- /dev/null
+++ b/tools/perf/arch/x86/util/evsel.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _EVSEL_H
+#define _EVSEL_H 1
+
+bool evsel__sys_has_perf_metrics(const struct evsel *evsel);
+
+#endif
diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c
index d68a0f48e41e..bcccfbade5c6 100644
--- a/tools/perf/arch/x86/util/intel-bts.c
+++ b/tools/perf/arch/x86/util/intel-bts.c
@@ -129,6 +129,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
}
evsel->core.attr.freq = 0;
evsel->core.attr.sample_period = 1;
+ evsel->needs_auxtrace_mmap = true;
intel_bts_evsel = evsel;
opts->full_auxtrace = true;
}
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index 38ec2666ec12..06c2cdfd8f2f 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -649,6 +649,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
evsel->core.attr.freq = 0;
evsel->core.attr.sample_period = 1;
evsel->no_aux_samples = true;
+ evsel->needs_auxtrace_mmap = true;
intel_pt_evsel = evsel;
opts->full_auxtrace = true;
}
@@ -810,18 +811,11 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
if (!cpu_wide && perf_can_record_cpu_wide()) {
struct evsel *switch_evsel;
- err = parse_events(evlist, "dummy:u", NULL);
- if (err)
- return err;
+ switch_evsel = evlist__add_dummy_on_all_cpus(evlist);
+ if (!switch_evsel)
+ return -ENOMEM;
- switch_evsel = evlist__last(evlist);
-
- switch_evsel->core.attr.freq = 0;
- switch_evsel->core.attr.sample_period = 1;
switch_evsel->core.attr.context_switch = 1;
-
- switch_evsel->core.system_wide = true;
- switch_evsel->no_aux_samples = true;
switch_evsel->immediate = true;
evsel__set_sample_bit(switch_evsel, TID);
@@ -870,20 +864,22 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
/* Add dummy event to keep tracking */
if (opts->full_auxtrace) {
+ bool need_system_wide_tracking;
struct evsel *tracking_evsel;
- err = parse_events(evlist, "dummy:u", NULL);
- if (err)
- return err;
+ /*
+ * User space tasks can migrate between CPUs, so when tracing
+ * selected CPUs, sideband for all CPUs is still needed.
+ */
+ need_system_wide_tracking = evlist->core.has_user_cpus &&
+ !intel_pt_evsel->core.attr.exclude_user;
- tracking_evsel = evlist__last(evlist);
+ tracking_evsel = evlist__add_aux_dummy(evlist, need_system_wide_tracking);
+ if (!tracking_evsel)
+ return -ENOMEM;
evlist__set_tracking_event(evlist, tracking_evsel);
- tracking_evsel->core.attr.freq = 0;
- tracking_evsel->core.attr.sample_period = 1;
-
- tracking_evsel->no_aux_samples = true;
if (need_immediate)
tracking_evsel->immediate = true;
diff --git a/tools/perf/arch/x86/util/topdown.c b/tools/perf/arch/x86/util/topdown.c
index 2f3d96aa92a5..f81a7cfe4d63 100644
--- a/tools/perf/arch/x86/util/topdown.c
+++ b/tools/perf/arch/x86/util/topdown.c
@@ -3,6 +3,32 @@
#include "api/fs/fs.h"
#include "util/pmu.h"
#include "util/topdown.h"
+#include "topdown.h"
+#include "evsel.h"
+
+/* Check whether there is a PMU which supports the perf metrics. */
+bool topdown_sys_has_perf_metrics(void)
+{
+ static bool has_perf_metrics;
+ static bool cached;
+ struct perf_pmu *pmu;
+
+ if (cached)
+ return has_perf_metrics;
+
+ /*
+ * The perf metrics feature is a core PMU feature.
+ * The PERF_TYPE_RAW type is the type of a core PMU.
+ * The slots event is only available when the core PMU
+ * supports the perf metrics feature.
+ */
+ pmu = perf_pmu__find_by_type(PERF_TYPE_RAW);
+ if (pmu && pmu_have_event(pmu->name, "slots"))
+ has_perf_metrics = true;
+
+ cached = true;
+ return has_perf_metrics;
+}
/*
* Check whether we can use a group for top down.
@@ -30,33 +56,19 @@ void arch_topdown_group_warn(void)
#define TOPDOWN_SLOTS 0x0400
-static bool is_topdown_slots_event(struct evsel *counter)
-{
- if (!counter->pmu_name)
- return false;
-
- if (strcmp(counter->pmu_name, "cpu"))
- return false;
-
- if (counter->core.attr.config == TOPDOWN_SLOTS)
- return true;
-
- return false;
-}
-
/*
* Check whether a topdown group supports sample-read.
*
- * Only Topdown metic supports sample-read. The slots
+ * Only Topdown metric supports sample-read. The slots
* event must be the leader of the topdown group.
*/
bool arch_topdown_sample_read(struct evsel *leader)
{
- if (!pmu_have_event("cpu", "slots"))
+ if (!evsel__sys_has_perf_metrics(leader))
return false;
- if (is_topdown_slots_event(leader))
+ if (leader->core.attr.config == TOPDOWN_SLOTS)
return true;
return false;
diff --git a/tools/perf/arch/x86/util/topdown.h b/tools/perf/arch/x86/util/topdown.h
new file mode 100644
index 000000000000..46bf9273e572
--- /dev/null
+++ b/tools/perf/arch/x86/util/topdown.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOPDOWN_H
+#define _TOPDOWN_H 1
+
+bool topdown_sys_has_perf_metrics(void);
+
+#endif
diff --git a/tools/perf/bench/Build b/tools/perf/bench/Build
index 61d45fcb4057..6b6155a8ad09 100644
--- a/tools/perf/bench/Build
+++ b/tools/perf/bench/Build
@@ -14,6 +14,7 @@ perf-y += kallsyms-parse.o
perf-y += find-bit-bench.o
perf-y += inject-buildid.o
perf-y += evlist-open-close.o
+perf-y += breakpoint.o
perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o
perf-$(CONFIG_X86_64) += mem-memset-x86-64-asm.o
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
index b3480bc33fe8..6cefb4315d75 100644
--- a/tools/perf/bench/bench.h
+++ b/tools/perf/bench/bench.h
@@ -49,6 +49,8 @@ int bench_synthesize(int argc, const char **argv);
int bench_kallsyms_parse(int argc, const char **argv);
int bench_inject_build_id(int argc, const char **argv);
int bench_evlist_open_close(int argc, const char **argv);
+int bench_breakpoint_thread(int argc, const char **argv);
+int bench_breakpoint_enable(int argc, const char **argv);
#define BENCH_FORMAT_DEFAULT_STR "default"
#define BENCH_FORMAT_DEFAULT 0
diff --git a/tools/perf/bench/breakpoint.c b/tools/perf/bench/breakpoint.c
new file mode 100644
index 000000000000..41385f89ffc7
--- /dev/null
+++ b/tools/perf/bench/breakpoint.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <subcmd/parse-options.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/perf_event.h>
+#include <linux/time64.h>
+#include <sys/syscall.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <pthread.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include "bench.h"
+#include "futex.h"
+
+struct {
+ unsigned int nbreakpoints;
+ unsigned int nparallel;
+ unsigned int nthreads;
+} thread_params = {
+ .nbreakpoints = 1,
+ .nparallel = 1,
+ .nthreads = 1,
+};
+
+static const struct option thread_options[] = {
+ OPT_UINTEGER('b', "breakpoints", &thread_params.nbreakpoints,
+ "Specify amount of breakpoints"),
+ OPT_UINTEGER('p', "parallelism", &thread_params.nparallel, "Specify amount of parallelism"),
+ OPT_UINTEGER('t', "threads", &thread_params.nthreads, "Specify amount of threads"),
+ OPT_END()
+};
+
+static const char * const thread_usage[] = {
+ "perf bench breakpoint thread <options>",
+ NULL
+};
+
+struct breakpoint {
+ int fd;
+ char watched;
+};
+
+static int breakpoint_setup(void *addr)
+{
+ struct perf_event_attr attr = { .size = 0, };
+
+ attr.type = PERF_TYPE_BREAKPOINT;
+ attr.size = sizeof(attr);
+ attr.inherit = 1;
+ attr.exclude_kernel = 1;
+ attr.exclude_hv = 1;
+ attr.bp_addr = (unsigned long)addr;
+ attr.bp_type = HW_BREAKPOINT_RW;
+ attr.bp_len = HW_BREAKPOINT_LEN_1;
+ return syscall(SYS_perf_event_open, &attr, 0, -1, -1, 0);
+}
+
+static void *passive_thread(void *arg)
+{
+ unsigned int *done = (unsigned int *)arg;
+
+ while (!__atomic_load_n(done, __ATOMIC_RELAXED))
+ futex_wait(done, 0, NULL, 0);
+ return NULL;
+}
+
+static void *active_thread(void *arg)
+{
+ unsigned int *done = (unsigned int *)arg;
+
+ while (!__atomic_load_n(done, __ATOMIC_RELAXED));
+ return NULL;
+}
+
+static void *breakpoint_thread(void *arg)
+{
+ unsigned int i, done;
+ int *repeat = (int *)arg;
+ pthread_t *threads;
+
+ threads = calloc(thread_params.nthreads, sizeof(threads[0]));
+ if (!threads)
+ exit((perror("calloc"), EXIT_FAILURE));
+
+ while (__atomic_fetch_sub(repeat, 1, __ATOMIC_RELAXED) > 0) {
+ done = 0;
+ for (i = 0; i < thread_params.nthreads; i++) {
+ if (pthread_create(&threads[i], NULL, passive_thread, &done))
+ exit((perror("pthread_create"), EXIT_FAILURE));
+ }
+ __atomic_store_n(&done, 1, __ATOMIC_RELAXED);
+ futex_wake(&done, thread_params.nthreads, 0);
+ for (i = 0; i < thread_params.nthreads; i++)
+ pthread_join(threads[i], NULL);
+ }
+ free(threads);
+ return NULL;
+}
+
+// The benchmark creates nbreakpoints inheritable breakpoints,
+// then starts nparallel threads which create and join bench_repeat batches of nthreads threads.
+int bench_breakpoint_thread(int argc, const char **argv)
+{
+ unsigned int i, result_usec;
+ int repeat = bench_repeat;
+ struct breakpoint *breakpoints;
+ pthread_t *parallel;
+ struct timeval start, stop, diff;
+
+ if (parse_options(argc, argv, thread_options, thread_usage, 0)) {
+ usage_with_options(thread_usage, thread_options);
+ exit(EXIT_FAILURE);
+ }
+ breakpoints = calloc(thread_params.nbreakpoints, sizeof(breakpoints[0]));
+ parallel = calloc(thread_params.nparallel, sizeof(parallel[0]));
+ if (!breakpoints || !parallel)
+ exit((perror("calloc"), EXIT_FAILURE));
+
+ for (i = 0; i < thread_params.nbreakpoints; i++) {
+ breakpoints[i].fd = breakpoint_setup(&breakpoints[i].watched);
+ if (breakpoints[i].fd == -1)
+ exit((perror("perf_event_open"), EXIT_FAILURE));
+ }
+ gettimeofday(&start, NULL);
+ for (i = 0; i < thread_params.nparallel; i++) {
+ if (pthread_create(&parallel[i], NULL, breakpoint_thread, &repeat))
+ exit((perror("pthread_create"), EXIT_FAILURE));
+ }
+ for (i = 0; i < thread_params.nparallel; i++)
+ pthread_join(parallel[i], NULL);
+ gettimeofday(&stop, NULL);
+ timersub(&stop, &start, &diff);
+ for (i = 0; i < thread_params.nbreakpoints; i++)
+ close(breakpoints[i].fd);
+ free(parallel);
+ free(breakpoints);
+ switch (bench_format) {
+ case BENCH_FORMAT_DEFAULT:
+ printf("# Created/joined %d threads with %d breakpoints and %d parallelism\n",
+ bench_repeat, thread_params.nbreakpoints, thread_params.nparallel);
+ printf(" %14s: %lu.%03lu [sec]\n\n", "Total time",
+ (long)diff.tv_sec, (long)(diff.tv_usec / USEC_PER_MSEC));
+ result_usec = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
+ printf(" %14lf usecs/op\n",
+ (double)result_usec / bench_repeat / thread_params.nthreads);
+ printf(" %14lf usecs/op/cpu\n",
+ (double)result_usec / bench_repeat /
+ thread_params.nthreads * thread_params.nparallel);
+ break;
+ case BENCH_FORMAT_SIMPLE:
+ printf("%lu.%03lu\n", (long)diff.tv_sec, (long)(diff.tv_usec / USEC_PER_MSEC));
+ break;
+ default:
+ fprintf(stderr, "Unknown format: %d\n", bench_format);
+ exit(EXIT_FAILURE);
+ }
+ return 0;
+}
+
+struct {
+ unsigned int npassive;
+ unsigned int nactive;
+} enable_params = {
+ .nactive = 0,
+ .npassive = 0,
+};
+
+static const struct option enable_options[] = {
+ OPT_UINTEGER('p', "passive", &enable_params.npassive, "Specify amount of passive threads"),
+ OPT_UINTEGER('a', "active", &enable_params.nactive, "Specify amount of active threads"),
+ OPT_END()
+};
+
+static const char * const enable_usage[] = {
+ "perf bench breakpoint enable <options>",
+ NULL
+};
+
+// The benchmark creates an inheritable breakpoint,
+// then starts npassive threads that block and nactive threads that actively spin
+// and then disables and enables the breakpoint bench_repeat times.
+int bench_breakpoint_enable(int argc, const char **argv)
+{
+ unsigned int i, nthreads, result_usec, done = 0;
+ char watched;
+ int fd;
+ pthread_t *threads;
+ struct timeval start, stop, diff;
+
+ if (parse_options(argc, argv, enable_options, enable_usage, 0)) {
+ usage_with_options(enable_usage, enable_options);
+ exit(EXIT_FAILURE);
+ }
+ fd = breakpoint_setup(&watched);
+ if (fd == -1)
+ exit((perror("perf_event_open"), EXIT_FAILURE));
+ nthreads = enable_params.npassive + enable_params.nactive;
+ threads = calloc(nthreads, sizeof(threads[0]));
+ if (!threads)
+ exit((perror("calloc"), EXIT_FAILURE));
+
+ for (i = 0; i < nthreads; i++) {
+ if (pthread_create(&threads[i], NULL,
+ i < enable_params.npassive ? passive_thread : active_thread, &done))
+ exit((perror("pthread_create"), EXIT_FAILURE));
+ }
+ usleep(10000); // let the threads block
+ gettimeofday(&start, NULL);
+ for (i = 0; i < bench_repeat; i++) {
+ if (ioctl(fd, PERF_EVENT_IOC_DISABLE, 0))
+ exit((perror("ioctl(PERF_EVENT_IOC_DISABLE)"), EXIT_FAILURE));
+ if (ioctl(fd, PERF_EVENT_IOC_ENABLE, 0))
+ exit((perror("ioctl(PERF_EVENT_IOC_ENABLE)"), EXIT_FAILURE));
+ }
+ gettimeofday(&stop, NULL);
+ timersub(&stop, &start, &diff);
+ __atomic_store_n(&done, 1, __ATOMIC_RELAXED);
+ futex_wake(&done, enable_params.npassive, 0);
+ for (i = 0; i < nthreads; i++)
+ pthread_join(threads[i], NULL);
+ free(threads);
+ close(fd);
+ switch (bench_format) {
+ case BENCH_FORMAT_DEFAULT:
+ printf("# Enabled/disabled breakpoint %d time with %d passive and %d active threads\n",
+ bench_repeat, enable_params.npassive, enable_params.nactive);
+ printf(" %14s: %lu.%03lu [sec]\n\n", "Total time",
+ (long)diff.tv_sec, (long)(diff.tv_usec / USEC_PER_MSEC));
+ result_usec = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
+ printf(" %14lf usecs/op\n", (double)result_usec / bench_repeat);
+ break;
+ case BENCH_FORMAT_SIMPLE:
+ printf("%lu.%03lu\n", (long)diff.tv_sec, (long)(diff.tv_usec / USEC_PER_MSEC));
+ break;
+ default:
+ fprintf(stderr, "Unknown format: %d\n", bench_format);
+ exit(EXIT_FAILURE);
+ }
+ return 0;
+}
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index e65dc380be15..2ffe071dbcff 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -54,6 +54,7 @@ struct perf_annotate {
bool skip_missing;
bool has_br_stack;
bool group_set;
+ float min_percent;
const char *sym_hist_filter;
const char *cpu_list;
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
@@ -324,6 +325,17 @@ static void hists__find_annotations(struct hists *hists,
(strcmp(he->ms.sym->name, ann->sym_hist_filter) != 0))
goto find_next;
+ if (ann->min_percent) {
+ float percent = 0;
+ u64 total = hists__total_period(hists);
+
+ if (total)
+ percent = 100.0 * he->stat.period / total;
+
+ if (percent < ann->min_percent)
+ goto find_next;
+ }
+
notes = symbol__annotation(he->ms.sym);
if (notes->src == NULL) {
find_next:
@@ -457,6 +469,16 @@ out:
return ret;
}
+static int parse_percent_limit(const struct option *opt, const char *str,
+ int unset __maybe_unused)
+{
+ struct perf_annotate *ann = opt->value;
+ double pcnt = strtof(str, NULL);
+
+ ann->min_percent = pcnt;
+ return 0;
+}
+
static const char * const annotate_usage[] = {
"perf annotate [<options>]",
NULL
@@ -557,6 +579,8 @@ int cmd_annotate(int argc, const char **argv)
OPT_CALLBACK(0, "percent-type", &annotate.opts, "local-period",
"Set percent type local/global-period/hits",
annotate_parse_percent_type),
+ OPT_CALLBACK(0, "percent-limit", &annotate, "percent",
+ "Don't show entries under that percent", parse_percent_limit),
OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
"Instruction Tracing options\n" ITRACE_HELP,
itrace_parse_synth_opts),
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index d291f3a8af5f..334ab897aae3 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -92,6 +92,13 @@ static struct bench internals_benchmarks[] = {
{ NULL, NULL, NULL }
};
+static struct bench breakpoint_benchmarks[] = {
+ { "thread", "Benchmark thread start/finish with breakpoints", bench_breakpoint_thread},
+ { "enable", "Benchmark breakpoint enable/disable", bench_breakpoint_enable},
+ { "all", "Run all breakpoint benchmarks", NULL},
+ { NULL, NULL, NULL },
+};
+
struct collection {
const char *name;
const char *summary;
@@ -110,6 +117,7 @@ static struct collection collections[] = {
{"epoll", "Epoll stressing benchmarks", epoll_benchmarks },
#endif
{ "internals", "Perf-internals benchmarks", internals_benchmarks },
+ { "breakpoint", "Breakpoint benchmarks", breakpoint_benchmarks },
{ "all", "All benchmarks", NULL },
{ NULL, NULL, NULL }
};
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index fbbed434014f..4898ee57d156 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -653,6 +653,7 @@ STAT_FN(lcl_hitm)
STAT_FN(store)
STAT_FN(st_l1hit)
STAT_FN(st_l1miss)
+STAT_FN(st_na)
STAT_FN(ld_fbhit)
STAT_FN(ld_l1hit)
STAT_FN(ld_l2hit)
@@ -677,7 +678,8 @@ static uint64_t total_records(struct c2c_stats *stats)
total = ldcnt +
stats->st_l1hit +
- stats->st_l1miss;
+ stats->st_l1miss +
+ stats->st_na;
return total;
}
@@ -899,6 +901,7 @@ PERCENT_FN(rmt_hitm)
PERCENT_FN(lcl_hitm)
PERCENT_FN(st_l1hit)
PERCENT_FN(st_l1miss)
+PERCENT_FN(st_na)
static int
percent_rmt_hitm_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
@@ -925,8 +928,8 @@ percent_rmt_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
double per_left;
double per_right;
- per_left = PERCENT(left, lcl_hitm);
- per_right = PERCENT(right, lcl_hitm);
+ per_left = PERCENT(left, rmt_hitm);
+ per_right = PERCENT(right, rmt_hitm);
return per_left - per_right;
}
@@ -1024,6 +1027,37 @@ percent_stores_l1miss_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
return per_left - per_right;
}
+static int
+percent_stores_na_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ int width = c2c_width(fmt, hpp, he->hists);
+ double per = PERCENT(he, st_na);
+ char buf[10];
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
+}
+
+static int
+percent_stores_na_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ return percent_color(fmt, hpp, he, percent_st_na);
+}
+
+static int64_t
+percent_stores_na_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ double per_left;
+ double per_right;
+
+ per_left = PERCENT(left, st_na);
+ per_right = PERCENT(right, st_na);
+
+ return per_left - per_right;
+}
+
STAT_FN(lcl_dram)
STAT_FN(rmt_dram)
@@ -1351,7 +1385,7 @@ static struct c2c_dimension dim_tot_stores = {
};
static struct c2c_dimension dim_stores_l1hit = {
- .header = HEADER_SPAN("---- Stores ----", "L1Hit", 1),
+ .header = HEADER_SPAN("--------- Stores --------", "L1Hit", 2),
.name = "stores_l1hit",
.cmp = st_l1hit_cmp,
.entry = st_l1hit_entry,
@@ -1366,8 +1400,16 @@ static struct c2c_dimension dim_stores_l1miss = {
.width = 7,
};
+static struct c2c_dimension dim_stores_na = {
+ .header = HEADER_SPAN_LOW("N/A"),
+ .name = "stores_na",
+ .cmp = st_na_cmp,
+ .entry = st_na_entry,
+ .width = 7,
+};
+
static struct c2c_dimension dim_cl_stores_l1hit = {
- .header = HEADER_SPAN("-- Store Refs --", "L1 Hit", 1),
+ .header = HEADER_SPAN("------- Store Refs ------", "L1 Hit", 2),
.name = "cl_stores_l1hit",
.cmp = st_l1hit_cmp,
.entry = st_l1hit_entry,
@@ -1382,6 +1424,14 @@ static struct c2c_dimension dim_cl_stores_l1miss = {
.width = 7,
};
+static struct c2c_dimension dim_cl_stores_na = {
+ .header = HEADER_SPAN_LOW("N/A"),
+ .name = "cl_stores_na",
+ .cmp = st_na_cmp,
+ .entry = st_na_entry,
+ .width = 7,
+};
+
static struct c2c_dimension dim_ld_fbhit = {
.header = HEADER_SPAN("----- Core Load Hit -----", "FB", 2),
.name = "ld_fbhit",
@@ -1471,7 +1521,7 @@ static struct c2c_dimension dim_percent_lcl_hitm = {
};
static struct c2c_dimension dim_percent_stores_l1hit = {
- .header = HEADER_SPAN("-- Store Refs --", "L1 Hit", 1),
+ .header = HEADER_SPAN("------- Store Refs ------", "L1 Hit", 2),
.name = "percent_stores_l1hit",
.cmp = percent_stores_l1hit_cmp,
.entry = percent_stores_l1hit_entry,
@@ -1488,6 +1538,15 @@ static struct c2c_dimension dim_percent_stores_l1miss = {
.width = 7,
};
+static struct c2c_dimension dim_percent_stores_na = {
+ .header = HEADER_SPAN_LOW("N/A"),
+ .name = "percent_stores_na",
+ .cmp = percent_stores_na_cmp,
+ .entry = percent_stores_na_entry,
+ .color = percent_stores_na_color,
+ .width = 7,
+};
+
static struct c2c_dimension dim_dram_lcl = {
.header = HEADER_SPAN("--- Load Dram ----", "Lcl", 1),
.name = "dram_lcl",
@@ -1618,8 +1677,10 @@ static struct c2c_dimension *dimensions[] = {
&dim_tot_stores,
&dim_stores_l1hit,
&dim_stores_l1miss,
+ &dim_stores_na,
&dim_cl_stores_l1hit,
&dim_cl_stores_l1miss,
+ &dim_cl_stores_na,
&dim_ld_fbhit,
&dim_ld_l1hit,
&dim_ld_l2hit,
@@ -1632,6 +1693,7 @@ static struct c2c_dimension *dimensions[] = {
&dim_percent_lcl_hitm,
&dim_percent_stores_l1hit,
&dim_percent_stores_l1miss,
+ &dim_percent_stores_na,
&dim_dram_lcl,
&dim_dram_rmt,
&dim_pid,
@@ -2149,6 +2211,7 @@ static void print_c2c__display_stats(FILE *out)
fprintf(out, " Store - no mapping : %10d\n", stats->st_noadrs);
fprintf(out, " Store L1D Hit : %10d\n", stats->st_l1hit);
fprintf(out, " Store L1D Miss : %10d\n", stats->st_l1miss);
+ fprintf(out, " Store No available memory level : %10d\n", stats->st_na);
fprintf(out, " No Page Map Rejects : %10d\n", stats->nomap);
fprintf(out, " Unable to parse data source : %10d\n", stats->noparse);
}
@@ -2171,6 +2234,7 @@ static void print_shared_cacheline_info(FILE *out)
fprintf(out, " Blocked Access on shared lines : %10d\n", stats->blk_data + stats->blk_addr);
fprintf(out, " Store HITs on shared lines : %10d\n", stats->store);
fprintf(out, " Store L1D hits on shared lines : %10d\n", stats->st_l1hit);
+ fprintf(out, " Store No available memory level : %10d\n", stats->st_na);
fprintf(out, " Total Merged records : %10d\n", hitm_cnt + stats->store);
}
@@ -2193,10 +2257,10 @@ static void print_cacheline(struct c2c_hists *c2c_hists,
fprintf(out, "\n");
}
- fprintf(out, " -------------------------------------------------------------\n");
+ fprintf(out, " ----------------------------------------------------------------------\n");
__hist_entry__snprintf(he_cl, &hpp, hpp_list);
fprintf(out, "%s\n", bf);
- fprintf(out, " -------------------------------------------------------------\n");
+ fprintf(out, " ----------------------------------------------------------------------\n");
hists__fprintf(&c2c_hists->hists, false, 0, 0, 0, out, false);
}
@@ -2213,6 +2277,7 @@ static void print_pareto(FILE *out)
"cl_lcl_hitm,"
"cl_stores_l1hit,"
"cl_stores_l1miss,"
+ "cl_stores_na,"
"dcacheline";
perf_hpp_list__init(&hpp_list);
@@ -2664,6 +2729,7 @@ static int build_cl_output(char *cl_sort, bool no_source)
"percent_lcl_hitm,"
"percent_stores_l1hit,"
"percent_stores_l1miss,"
+ "percent_stores_na,"
"offset,offset_node,dcacheline_count,",
add_pid ? "pid," : "",
add_tid ? "tid," : "",
@@ -2735,9 +2801,7 @@ static int perf_c2c__report(int argc, const char **argv)
"the input file to process"),
OPT_INCR('N', "node-info", &c2c.node_info,
"show extra node info in report (repeat for more info)"),
-#ifdef HAVE_SLANG_SUPPORT
OPT_BOOLEAN(0, "stdio", &c2c.use_stdio, "Use the stdio interface"),
-#endif
OPT_BOOLEAN(0, "stats", &c2c.stats_only,
"Display only statistic tables (implies --stdio)"),
OPT_BOOLEAN(0, "full-symbols", &c2c.symbol_full,
@@ -2767,6 +2831,10 @@ static int perf_c2c__report(int argc, const char **argv)
if (argc)
usage_with_options(report_c2c_usage, options);
+#ifndef HAVE_SLANG_SUPPORT
+ c2c.use_stdio = true;
+#endif
+
if (c2c.stats_only)
c2c.use_stdio = true;
@@ -2850,7 +2918,7 @@ static int perf_c2c__report(int argc, const char **argv)
"tot_recs,"
"tot_loads,"
"tot_stores,"
- "stores_l1hit,stores_l1miss,"
+ "stores_l1hit,stores_l1miss,stores_na,"
"ld_fbhit,ld_l1hit,ld_l2hit,"
"ld_lclhit,lcl_hitm,"
"ld_rmthit,rmt_hitm,"
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 5b50a4abf95f..54d4e508a092 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -27,6 +27,8 @@
#include "util/namespaces.h"
#include "util/util.h"
+#include <internal/lib.h>
+
#include <linux/err.h>
#include <subcmd/parse-options.h>
#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
@@ -48,6 +50,7 @@ struct perf_inject {
bool in_place_update;
bool in_place_update_dry_run;
bool is_pipe;
+ bool copy_kcore_dir;
const char *input_name;
struct perf_data output;
u64 bytes_written;
@@ -55,6 +58,7 @@ struct perf_inject {
struct list_head samples;
struct itrace_synth_opts itrace_synth_opts;
char event_copy[PERF_SAMPLE_MAX_SIZE];
+ struct perf_file_section secs[HEADER_FEAT_BITS];
};
struct event_entry {
@@ -763,6 +767,135 @@ static int parse_vm_time_correlation(const struct option *opt, const char *str,
return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
}
+static int save_section_info_cb(struct perf_file_section *section,
+ struct perf_header *ph __maybe_unused,
+ int feat, int fd __maybe_unused, void *data)
+{
+ struct perf_inject *inject = data;
+
+ inject->secs[feat] = *section;
+ return 0;
+}
+
+static int save_section_info(struct perf_inject *inject)
+{
+ struct perf_header *header = &inject->session->header;
+ int fd = perf_data__fd(inject->session->data);
+
+ return perf_header__process_sections(header, fd, inject, save_section_info_cb);
+}
+
+static bool keep_feat(int feat)
+{
+ switch (feat) {
+ /* Keep original information that describes the machine or software */
+ case HEADER_TRACING_DATA:
+ case HEADER_HOSTNAME:
+ case HEADER_OSRELEASE:
+ case HEADER_VERSION:
+ case HEADER_ARCH:
+ case HEADER_NRCPUS:
+ case HEADER_CPUDESC:
+ case HEADER_CPUID:
+ case HEADER_TOTAL_MEM:
+ case HEADER_CPU_TOPOLOGY:
+ case HEADER_NUMA_TOPOLOGY:
+ case HEADER_PMU_MAPPINGS:
+ case HEADER_CACHE:
+ case HEADER_MEM_TOPOLOGY:
+ case HEADER_CLOCKID:
+ case HEADER_BPF_PROG_INFO:
+ case HEADER_BPF_BTF:
+ case HEADER_CPU_PMU_CAPS:
+ case HEADER_CLOCK_DATA:
+ case HEADER_HYBRID_TOPOLOGY:
+ case HEADER_HYBRID_CPU_PMU_CAPS:
+ return true;
+ /* Information that can be updated */
+ case HEADER_BUILD_ID:
+ case HEADER_CMDLINE:
+ case HEADER_EVENT_DESC:
+ case HEADER_BRANCH_STACK:
+ case HEADER_GROUP_DESC:
+ case HEADER_AUXTRACE:
+ case HEADER_STAT:
+ case HEADER_SAMPLE_TIME:
+ case HEADER_DIR_FORMAT:
+ case HEADER_COMPRESSED:
+ default:
+ return false;
+ };
+}
+
+static int read_file(int fd, u64 offs, void *buf, size_t sz)
+{
+ ssize_t ret = preadn(fd, buf, sz, offs);
+
+ if (ret < 0)
+ return -errno;
+ if ((size_t)ret != sz)
+ return -EINVAL;
+ return 0;
+}
+
+static int feat_copy(struct perf_inject *inject, int feat, struct feat_writer *fw)
+{
+ int fd = perf_data__fd(inject->session->data);
+ u64 offs = inject->secs[feat].offset;
+ size_t sz = inject->secs[feat].size;
+ void *buf = malloc(sz);
+ int ret;
+
+ if (!buf)
+ return -ENOMEM;
+
+ ret = read_file(fd, offs, buf, sz);
+ if (ret)
+ goto out_free;
+
+ ret = fw->write(fw, buf, sz);
+out_free:
+ free(buf);
+ return ret;
+}
+
+struct inject_fc {
+ struct feat_copier fc;
+ struct perf_inject *inject;
+};
+
+static int feat_copy_cb(struct feat_copier *fc, int feat, struct feat_writer *fw)
+{
+ struct inject_fc *inj_fc = container_of(fc, struct inject_fc, fc);
+ struct perf_inject *inject = inj_fc->inject;
+ int ret;
+
+ if (!inject->secs[feat].offset ||
+ !keep_feat(feat))
+ return 0;
+
+ ret = feat_copy(inject, feat, fw);
+ if (ret < 0)
+ return ret;
+
+ return 1; /* Feature section copied */
+}
+
+static int copy_kcore_dir(struct perf_inject *inject)
+{
+ char *cmd;
+ int ret;
+
+ ret = asprintf(&cmd, "cp -r -n %s/kcore_dir* %s >/dev/null 2>&1",
+ inject->input_name, inject->output.path);
+ if (ret < 0)
+ return ret;
+ pr_debug("%s\n", cmd);
+ ret = system(cmd);
+ free(cmd);
+ return ret;
+}
+
static int output_fd(struct perf_inject *inject)
{
return inject->in_place_update ? -1 : perf_data__fd(&inject->output);
@@ -785,7 +918,7 @@ static int __cmd_inject(struct perf_inject *inject)
inject->tool.tracing_data = perf_event__repipe_tracing_data;
}
- output_data_offset = session->header.data_offset;
+ output_data_offset = perf_session__data_offset(session->evlist);
if (inject->build_id_all) {
inject->tool.mmap = perf_event__repipe_buildid_mmap;
@@ -848,6 +981,11 @@ static int __cmd_inject(struct perf_inject *inject)
return ret;
if (!inject->is_pipe && !inject->in_place_update) {
+ struct inject_fc inj_fc = {
+ .fc.copy = feat_copy_cb,
+ .inject = inject,
+ };
+
if (inject->build_ids)
perf_header__set_feat(&session->header,
HEADER_BUILD_ID);
@@ -872,7 +1010,13 @@ static int __cmd_inject(struct perf_inject *inject)
}
session->header.data_offset = output_data_offset;
session->header.data_size = inject->bytes_written;
- perf_session__write_header(session, session->evlist, fd, true);
+ perf_session__inject_header(session, session->evlist, fd, &inj_fc.fc);
+
+ if (inject->copy_kcore_dir) {
+ ret = copy_kcore_dir(inject);
+ if (ret)
+ return ret;
+ }
}
return ret;
@@ -1009,9 +1153,16 @@ int cmd_inject(int argc, const char **argv)
}
if (!inject.in_place_update_dry_run)
data.in_place_update = true;
- } else if (perf_data__open(&inject.output)) {
- perror("failed to create output file");
- return -1;
+ } else {
+ if (strcmp(inject.output.path, "-") && !inject.strip &&
+ has_kcore_dir(inject.input_name)) {
+ inject.output.is_dir = true;
+ inject.copy_kcore_dir = true;
+ }
+ if (perf_data__open(&inject.output)) {
+ perror("failed to create output file");
+ return -1;
+ }
}
data.path = inject.input_name;
@@ -1037,6 +1188,11 @@ int cmd_inject(int argc, const char **argv)
if (zstd_init(&(inject.session->zstd_data), 0) < 0)
pr_warning("Decompression initialization failed.\n");
+ /* Save original section info before feature bits change */
+ ret = save_section_info(&inject);
+ if (ret)
+ goto out_delete;
+
if (!data.is_pipe && inject.output.is_pipe) {
ret = perf_header__write_pipe(perf_data__fd(&inject.output));
if (ret < 0) {
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 2fa687f73e5e..3696ae97f149 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -1603,6 +1603,8 @@ int cmd_kvm(int argc, const char **argv)
"file", "file saving guest os /proc/kallsyms"),
OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
"file", "file saving guest os /proc/modules"),
+ OPT_BOOLEAN(0, "guest-code", &symbol_conf.guest_code,
+ "Guest code can be found in hypervisor process"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_END()
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index cdfe1d4ced4b..23a33ac15e68 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -65,7 +65,7 @@ struct lock_stat {
u64 wait_time_min;
u64 wait_time_max;
- int discard; /* flag of blacklist */
+ int broken; /* flag of blacklist */
int combined;
};
@@ -118,6 +118,7 @@ struct thread_stat {
static struct rb_root thread_stats;
static bool combine_locks;
+static bool show_thread_stats;
static struct thread_stat *thread_stat_find(u32 tid)
{
@@ -384,9 +385,6 @@ static void combine_lock_stats(struct lock_stat *st)
ret = !!st->name - !!p->name;
if (ret == 0) {
- if (st->discard)
- goto out;
-
p->nr_acquired += st->nr_acquired;
p->nr_contended += st->nr_contended;
p->wait_time_total += st->wait_time_total;
@@ -399,10 +397,7 @@ static void combine_lock_stats(struct lock_stat *st)
if (p->wait_time_max < st->wait_time_max)
p->wait_time_max = st->wait_time_max;
- /* now it got a new !discard record */
- p->discard = 0;
-
-out:
+ p->broken |= st->broken;
st->combined = 1;
return;
}
@@ -415,15 +410,6 @@ out:
rb_link_node(&st->rb, parent, rb);
rb_insert_color(&st->rb, &sorted);
-
- if (st->discard) {
- st->nr_acquired = 0;
- st->nr_contended = 0;
- st->wait_time_total = 0;
- st->avg_wait_time = 0;
- st->wait_time_min = ULLONG_MAX;
- st->wait_time_max = 0;
- }
}
static void insert_to_result(struct lock_stat *st,
@@ -557,11 +543,13 @@ static int report_lock_acquire_event(struct evsel *evsel,
u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
int flag = evsel__intval(evsel, sample, "flags");
+ /* abuse ls->addr for tid */
+ if (show_thread_stats)
+ addr = sample->tid;
+
ls = lock_stat_findnew(addr, name);
if (!ls)
return -ENOMEM;
- if (ls->discard)
- return 0;
ts = thread_stat_findnew(sample->tid);
if (!ts)
@@ -599,9 +587,11 @@ static int report_lock_acquire_event(struct evsel *evsel,
case SEQ_STATE_ACQUIRING:
case SEQ_STATE_CONTENDED:
broken:
- /* broken lock sequence, discard it */
- ls->discard = 1;
- bad_hist[BROKEN_ACQUIRE]++;
+ /* broken lock sequence */
+ if (!ls->broken) {
+ ls->broken = 1;
+ bad_hist[BROKEN_ACQUIRE]++;
+ }
list_del_init(&seq->list);
free(seq);
goto end;
@@ -626,11 +616,12 @@ static int report_lock_acquired_event(struct evsel *evsel,
const char *name = evsel__strval(evsel, sample, "name");
u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
+ if (show_thread_stats)
+ addr = sample->tid;
+
ls = lock_stat_findnew(addr, name);
if (!ls)
return -ENOMEM;
- if (ls->discard)
- return 0;
ts = thread_stat_findnew(sample->tid);
if (!ts)
@@ -657,9 +648,11 @@ static int report_lock_acquired_event(struct evsel *evsel,
case SEQ_STATE_RELEASED:
case SEQ_STATE_ACQUIRED:
case SEQ_STATE_READ_ACQUIRED:
- /* broken lock sequence, discard it */
- ls->discard = 1;
- bad_hist[BROKEN_ACQUIRED]++;
+ /* broken lock sequence */
+ if (!ls->broken) {
+ ls->broken = 1;
+ bad_hist[BROKEN_ACQUIRED]++;
+ }
list_del_init(&seq->list);
free(seq);
goto end;
@@ -685,11 +678,12 @@ static int report_lock_contended_event(struct evsel *evsel,
const char *name = evsel__strval(evsel, sample, "name");
u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
+ if (show_thread_stats)
+ addr = sample->tid;
+
ls = lock_stat_findnew(addr, name);
if (!ls)
return -ENOMEM;
- if (ls->discard)
- return 0;
ts = thread_stat_findnew(sample->tid);
if (!ts)
@@ -709,9 +703,11 @@ static int report_lock_contended_event(struct evsel *evsel,
case SEQ_STATE_ACQUIRED:
case SEQ_STATE_READ_ACQUIRED:
case SEQ_STATE_CONTENDED:
- /* broken lock sequence, discard it */
- ls->discard = 1;
- bad_hist[BROKEN_CONTENDED]++;
+ /* broken lock sequence */
+ if (!ls->broken) {
+ ls->broken = 1;
+ bad_hist[BROKEN_CONTENDED]++;
+ }
list_del_init(&seq->list);
free(seq);
goto end;
@@ -737,11 +733,12 @@ static int report_lock_release_event(struct evsel *evsel,
const char *name = evsel__strval(evsel, sample, "name");
u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
+ if (show_thread_stats)
+ addr = sample->tid;
+
ls = lock_stat_findnew(addr, name);
if (!ls)
return -ENOMEM;
- if (ls->discard)
- return 0;
ts = thread_stat_findnew(sample->tid);
if (!ts)
@@ -767,9 +764,11 @@ static int report_lock_release_event(struct evsel *evsel,
case SEQ_STATE_ACQUIRING:
case SEQ_STATE_CONTENDED:
case SEQ_STATE_RELEASED:
- /* broken lock sequence, discard it */
- ls->discard = 1;
- bad_hist[BROKEN_RELEASE]++;
+ /* broken lock sequence */
+ if (!ls->broken) {
+ ls->broken = 1;
+ bad_hist[BROKEN_RELEASE]++;
+ }
goto free_seq;
default:
BUG_ON("Unknown state of lock sequence found!\n");
@@ -854,15 +853,26 @@ static void print_result(void)
bad = total = 0;
while ((st = pop_from_result())) {
total++;
- if (st->discard) {
+ if (st->broken)
bad++;
+ if (!st->nr_acquired)
continue;
- }
+
bzero(cut_name, 20);
if (strlen(st->name) < 20) {
/* output raw name */
- pr_info("%20s ", st->name);
+ const char *name = st->name;
+
+ if (show_thread_stats) {
+ struct thread *t;
+
+ /* st->addr contains tid of thread */
+ t = perf_session__findnew(session, st->addr);
+ name = thread__comm_str(t);
+ }
+
+ pr_info("%20s ", name);
} else {
strncpy(cut_name, st->name, 16);
cut_name[16] = '.';
@@ -1073,7 +1083,7 @@ out_delete:
static int __cmd_record(int argc, const char **argv)
{
const char *record_args[] = {
- "record", "-R", "-m", "1024", "-c", "1", "--synth", "no",
+ "record", "-R", "-m", "1024", "-c", "1", "--synth", "task",
};
unsigned int rec_argc, i, j, ret;
const char **rec_argv;
@@ -1139,6 +1149,8 @@ int cmd_lock(int argc, const char **argv)
/* TODO: type */
OPT_BOOLEAN('c', "combine-locks", &combine_locks,
"combine locks in the same class"),
+ OPT_BOOLEAN('t', "threads", &show_thread_stats,
+ "show per-thread lock stats"),
OPT_PARENT(lock_options)
};
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 069825c48d40..9a71f0330137 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -49,6 +49,7 @@
#include "util/clockid.h"
#include "util/pmu-hybrid.h"
#include "util/evlist-hybrid.h"
+#include "util/off_cpu.h"
#include "asm/bug.h"
#include "perf.h"
#include "cputopo.h"
@@ -162,6 +163,7 @@ struct record {
bool buildid_mmap;
bool timestamp_filename;
bool timestamp_boundary;
+ bool off_cpu;
struct switch_output switch_output;
unsigned long long samples;
unsigned long output_max_size; /* = 0: unlimited */
@@ -869,7 +871,6 @@ static int record__auxtrace_init(struct record *rec __maybe_unused)
static int record__config_text_poke(struct evlist *evlist)
{
struct evsel *evsel;
- int err;
/* Nothing to do if text poke is already configured */
evlist__for_each_entry(evlist, evsel) {
@@ -877,32 +878,23 @@ static int record__config_text_poke(struct evlist *evlist)
return 0;
}
- err = parse_events(evlist, "dummy:u", NULL);
- if (err)
- return err;
-
- evsel = evlist__last(evlist);
+ evsel = evlist__add_dummy_on_all_cpus(evlist);
+ if (!evsel)
+ return -ENOMEM;
- evsel->core.attr.freq = 0;
- evsel->core.attr.sample_period = 1;
evsel->core.attr.text_poke = 1;
evsel->core.attr.ksymbol = 1;
-
- evsel->core.system_wide = true;
- evsel->no_aux_samples = true;
evsel->immediate = true;
-
- /* Text poke must be collected on all CPUs */
- perf_cpu_map__put(evsel->core.own_cpus);
- evsel->core.own_cpus = perf_cpu_map__new(NULL);
- perf_cpu_map__put(evsel->core.cpus);
- evsel->core.cpus = perf_cpu_map__get(evsel->core.own_cpus);
-
evsel__set_sample_bit(evsel, TIME);
return 0;
}
+static int record__config_off_cpu(struct record *rec)
+{
+ return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts);
+}
+
static bool record__kcore_readable(struct machine *machine)
{
char kcore[PATH_MAX];
@@ -982,14 +974,20 @@ static void record__thread_data_close_pipes(struct record_thread *thread_data)
}
}
+static bool evlist__per_thread(struct evlist *evlist)
+{
+ return cpu_map__is_dummy(evlist->core.user_requested_cpus);
+}
+
static int record__thread_data_init_maps(struct record_thread *thread_data, struct evlist *evlist)
{
int m, tm, nr_mmaps = evlist->core.nr_mmaps;
struct mmap *mmap = evlist->mmap;
struct mmap *overwrite_mmap = evlist->overwrite_mmap;
- struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
+ struct perf_cpu_map *cpus = evlist->core.all_cpus;
+ bool per_thread = evlist__per_thread(evlist);
- if (cpu_map__is_dummy(cpus))
+ if (per_thread)
thread_data->nr_mmaps = nr_mmaps;
else
thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
@@ -1010,8 +1008,8 @@ static int record__thread_data_init_maps(struct record_thread *thread_data, stru
thread_data->nr_mmaps, thread_data->maps, thread_data->overwrite_maps);
for (m = 0, tm = 0; m < nr_mmaps && tm < thread_data->nr_mmaps; m++) {
- if (cpu_map__is_dummy(cpus) ||
- test_bit(cpus->map[m].cpu, thread_data->mask->maps.bits)) {
+ if (per_thread ||
+ test_bit(perf_cpu_map__cpu(cpus, m).cpu, thread_data->mask->maps.bits)) {
if (thread_data->maps) {
thread_data->maps[tm] = &mmap[m];
pr_debug2("thread_data[%p]: cpu%d: maps[%d] -> mmap[%d]\n",
@@ -1885,7 +1883,7 @@ static int record__synthesize(struct record *rec, bool tail)
return err;
}
- err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.user_requested_cpus,
+ err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.all_cpus,
process_synthesized_event, NULL);
if (err < 0) {
pr_err("Couldn't synthesize cpu map.\n");
@@ -2600,6 +2598,9 @@ out_free_threads:
} else
status = err;
+ if (rec->off_cpu)
+ rec->bytes_written += off_cpu_write(rec->session);
+
record__synthesize(rec, true);
/* this will be recalculated during process_buildids() */
rec->samples = 0;
@@ -3324,6 +3325,7 @@ static struct option __record_options[] = {
OPT_CALLBACK_OPTARG(0, "threads", &record.opts, NULL, "spec",
"write collected trace data into several data files using parallel threads",
record__parse_threads),
+ OPT_BOOLEAN(0, "off-cpu", &record.off_cpu, "Enable off-cpu analysis"),
OPT_END()
};
@@ -3331,13 +3333,14 @@ struct option *record_options = __record_options;
static void record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
{
- int c;
+ struct perf_cpu cpu;
+ int idx;
if (cpu_map__is_dummy(cpus))
return;
- for (c = 0; c < cpus->nr; c++)
- set_bit(cpus->map[c].cpu, mask->bits);
+ perf_cpu_map__for_each_cpu(cpu, idx, cpus)
+ set_bit(cpu.cpu, mask->bits);
}
static int record__mmap_cpu_mask_init_spec(struct mmap_cpu_mask *mask, const char *mask_spec)
@@ -3404,8 +3407,8 @@ static int record__init_thread_cpu_masks(struct record *rec, struct perf_cpu_map
pr_debug("nr_threads: %d\n", rec->nr_threads);
for (t = 0; t < rec->nr_threads; t++) {
- set_bit(cpus->map[t].cpu, rec->thread_masks[t].maps.bits);
- set_bit(cpus->map[t].cpu, rec->thread_masks[t].affinity.bits);
+ set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits);
+ set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits);
if (verbose) {
pr_debug("thread_masks[%d]: ", t);
mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
@@ -3682,12 +3685,12 @@ static int record__init_thread_default_masks(struct record *rec, struct perf_cpu
static int record__init_thread_masks(struct record *rec)
{
int ret = 0;
- struct perf_cpu_map *cpus = rec->evlist->core.user_requested_cpus;
+ struct perf_cpu_map *cpus = rec->evlist->core.all_cpus;
if (!record__threads_enabled(rec))
return record__init_thread_default_masks(rec, cpus);
- if (cpu_map__is_dummy(cpus)) {
+ if (evlist__per_thread(rec->evlist)) {
pr_err("--per-thread option is mutually exclusive to parallel streaming mode.\n");
return -EINVAL;
}
@@ -3744,6 +3747,12 @@ int cmd_record(int argc, const char **argv)
# undef REASON
#endif
+#ifndef HAVE_BPF_SKEL
+# define set_nobuild(s, l, m, c) set_option_nobuild(record_options, s, l, m, c)
+ set_nobuild('\0', "off-cpu", "no BUILD_BPF_SKEL=1", true);
+# undef set_nobuild
+#endif
+
rec->opts.affinity = PERF_AFFINITY_SYS;
rec->evlist = evlist__new();
@@ -3980,6 +3989,14 @@ int cmd_record(int argc, const char **argv)
}
}
+ if (rec->off_cpu) {
+ err = record__config_off_cpu(rec);
+ if (err) {
+ pr_err("record__config_off_cpu failed, error %d\n", err);
+ goto out;
+ }
+ }
+
if (record_opts__config(&rec->opts)) {
err = -EINVAL;
goto out;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index cf5eab5431b4..c689054002cc 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -1742,16 +1742,44 @@ static int perf_sample__fprintf_pt_spacing(int len, FILE *fp)
return perf_sample__fprintf_spacing(len, 34, fp);
}
+/* If a value contains only printable ASCII characters padded with NULLs */
+static bool ptw_is_prt(u64 val)
+{
+ char c;
+ u32 i;
+
+ for (i = 0; i < sizeof(val); i++) {
+ c = ((char *)&val)[i];
+ if (!c)
+ break;
+ if (!isprint(c) || !isascii(c))
+ return false;
+ }
+ for (; i < sizeof(val); i++) {
+ c = ((char *)&val)[i];
+ if (c)
+ return false;
+ }
+ return true;
+}
+
static int perf_sample__fprintf_synth_ptwrite(struct perf_sample *sample, FILE *fp)
{
struct perf_synth_intel_ptwrite *data = perf_sample__synth_ptr(sample);
+ char str[sizeof(u64) + 1] = "";
int len;
+ u64 val;
if (perf_sample__bad_synth_size(sample, *data))
return 0;
- len = fprintf(fp, " IP: %u payload: %#" PRIx64 " ",
- data->ip, le64_to_cpu(data->payload));
+ val = le64_to_cpu(data->payload);
+ if (ptw_is_prt(val)) {
+ memcpy(str, &val, sizeof(val));
+ str[sizeof(val)] = 0;
+ }
+ len = fprintf(fp, " IP: %u payload: %#" PRIx64 " %s ",
+ data->ip, val, str);
return len + perf_sample__fprintf_pt_spacing(len, fp);
}
@@ -3884,6 +3912,8 @@ int cmd_script(int argc, const char **argv)
"file", "file saving guest os /proc/kallsyms"),
OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
"file", "file saving guest os /proc/modules"),
+ OPT_BOOLEAN(0, "guest-code", &symbol_conf.guest_code,
+ "Guest code can be found in hypervisor process"),
OPT_BOOLEAN('\0', "stitch-lbr", &script.stitch_lbr,
"Enable LBR callgraph stitching approach"),
OPTS_EVSWITCH(&script.evswitch),
@@ -3909,7 +3939,8 @@ int cmd_script(int argc, const char **argv)
if (symbol_conf.guestmount ||
symbol_conf.default_guest_vmlinux_name ||
symbol_conf.default_guest_kallsyms ||
- symbol_conf.default_guest_modules) {
+ symbol_conf.default_guest_modules ||
+ symbol_conf.guest_code) {
/*
* Enable guest sample processing.
*/
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index a96f106dc93a..d2ecd4d29624 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -216,6 +216,7 @@ static struct perf_stat_config stat_config = {
.run_count = 1,
.metric_only_len = METRIC_ONLY_LEN,
.walltime_nsecs_stats = &walltime_nsecs_stats,
+ .ru_stats = &ru_stats,
.big_num = true,
.ctl_fd = -1,
.ctl_fd_ack = -1,
@@ -271,11 +272,8 @@ static void evlist__check_cpu_maps(struct evlist *evlist)
pr_warning(" %s: %s\n", evsel->name, buf);
}
- for_each_group_evsel(pos, leader) {
- evsel__set_leader(pos, pos);
- pos->core.nr_members = 0;
- }
- evsel->core.leader->nr_members = 0;
+ for_each_group_evsel(pos, leader)
+ evsel__remove_from_group(pos, leader);
}
}
@@ -341,15 +339,35 @@ static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 t
static int read_single_counter(struct evsel *counter, int cpu_map_idx,
int thread, struct timespec *rs)
{
- if (counter->tool_event == PERF_TOOL_DURATION_TIME) {
- u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL;
- struct perf_counts_values *count =
- perf_counts(counter->counts, cpu_map_idx, thread);
- count->ena = count->run = val;
- count->val = val;
- return 0;
+ switch(counter->tool_event) {
+ case PERF_TOOL_DURATION_TIME: {
+ u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL;
+ struct perf_counts_values *count =
+ perf_counts(counter->counts, cpu_map_idx, thread);
+ count->ena = count->run = val;
+ count->val = val;
+ return 0;
+ }
+ case PERF_TOOL_USER_TIME:
+ case PERF_TOOL_SYSTEM_TIME: {
+ u64 val;
+ struct perf_counts_values *count =
+ perf_counts(counter->counts, cpu_map_idx, thread);
+ if (counter->tool_event == PERF_TOOL_USER_TIME)
+ val = ru_stats.ru_utime_usec_stat.mean;
+ else
+ val = ru_stats.ru_stime_usec_stat.mean;
+ count->ena = count->run = val;
+ count->val = val;
+ return 0;
+ }
+ default:
+ case PERF_TOOL_NONE:
+ return evsel__read_counter(counter, cpu_map_idx, thread);
+ case PERF_TOOL_MAX:
+ /* This should never be reached */
+ return 0;
}
- return evsel__read_counter(counter, cpu_map_idx, thread);
}
/*
@@ -364,9 +382,6 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_
if (!counter->supported)
return -ENOENT;
- if (counter->core.system_wide)
- nthreads = 1;
-
for (thread = 0; thread < nthreads; thread++) {
struct perf_counts_values *count;
@@ -1010,8 +1025,10 @@ try_again_reset:
evlist__reset_prev_raw_counts(evsel_list);
runtime_stat_reset(&stat_config);
perf_stat__reset_shadow_per_stat(&rt_stat);
- } else
+ } else {
update_stats(&walltime_nsecs_stats, t1 - t0);
+ update_rusage_stats(&ru_stats, &stat_config.ru_data);
+ }
/*
* Closing a group leader splits the group, and as we only disable
@@ -1235,6 +1252,8 @@ static struct option stat_options[] = {
OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
"disable CPU count aggregation", AGGR_NONE),
OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"),
+ OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge,
+ "Merge identical named hybrid events"),
OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator",
"print counts with custom separator"),
OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
@@ -1842,11 +1861,23 @@ static int add_default_attributes(void)
unsigned int max_level = 1;
char *str = NULL;
bool warn = false;
+ const char *pmu_name = "cpu";
if (!force_metric_only)
stat_config.metric_only = true;
- if (pmu_have_event("cpu", topdown_metric_L2_attrs[5])) {
+ if (perf_pmu__has_hybrid()) {
+ if (!evsel_list->hybrid_pmu_name) {
+ pr_warning("WARNING: default to use cpu_core topdown events\n");
+ evsel_list->hybrid_pmu_name = perf_pmu__hybrid_type_to_pmu("core");
+ }
+
+ pmu_name = evsel_list->hybrid_pmu_name;
+ if (!pmu_name)
+ return -1;
+ }
+
+ if (pmu_have_event(pmu_name, topdown_metric_L2_attrs[5])) {
metric_attrs = topdown_metric_L2_attrs;
max_level = 2;
}
@@ -1857,10 +1888,11 @@ static int add_default_attributes(void)
} else if (!stat_config.topdown_level)
stat_config.topdown_level = max_level;
- if (topdown_filter_events(metric_attrs, &str, 1) < 0) {
+ if (topdown_filter_events(metric_attrs, &str, 1, pmu_name) < 0) {
pr_err("Out of memory\n");
return -1;
}
+
if (metric_attrs[0] && str) {
if (!stat_config.interval && !stat_config.metric_only) {
fprintf(stat_config.output,
@@ -1884,10 +1916,12 @@ static int add_default_attributes(void)
}
if (topdown_filter_events(topdown_attrs, &str,
- arch_topdown_check_group(&warn)) < 0) {
+ arch_topdown_check_group(&warn),
+ pmu_name) < 0) {
pr_err("Out of memory\n");
return -1;
}
+
if (topdown_attrs[0] && str) {
struct parse_events_error errinfo;
if (warn)
@@ -2224,7 +2258,7 @@ static void setup_system_wide(int forks)
struct evsel *counter;
evlist__for_each_entry(evsel_list, counter) {
- if (!counter->core.system_wide &&
+ if (!counter->core.requires_cpu &&
strcmp(counter->name, "duration_time")) {
return;
}
@@ -2552,6 +2586,8 @@ int cmd_stat(int argc, const char **argv)
if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack))
goto out;
+ /* Enable ignoring missing threads when -p option is defined. */
+ evlist__first(evsel_list)->ignore_missing_thread = target.pid;
status = 0;
for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) {
if (stat_config.run_count != 1 && verbose > 0)
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 897fc504918b..f075cf37a65e 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -4280,6 +4280,7 @@ static int trace__replay(struct trace *trace)
goto out;
evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter");
+ trace->syscalls.events.sys_enter = evsel;
/* older kernels have syscalls tp versus raw_syscalls */
if (evsel == NULL)
evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter");
@@ -4292,6 +4293,7 @@ static int trace__replay(struct trace *trace)
}
evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit");
+ trace->syscalls.events.sys_exit = evsel;
if (evsel == NULL)
evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit");
if (evsel &&
diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c
index 9cd074a3d825..a71f491224da 100644
--- a/tools/perf/builtin-version.c
+++ b/tools/perf/builtin-version.c
@@ -65,6 +65,7 @@ static void library_status(void)
#endif
STATUS(HAVE_SYSCALL_TABLE_SUPPORT, syscall_table);
STATUS(HAVE_LIBBFD_SUPPORT, libbfd);
+ STATUS(HAVE_DEBUGINFOD_SUPPORT, debuginfod);
STATUS(HAVE_LIBELF_SUPPORT, libelf);
STATUS(HAVE_LIBNUMA_SUPPORT, libnuma);
STATUS(HAVE_LIBNUMA_SUPPORT, numa_num_possible_cpus);
diff --git a/tools/perf/perf-with-kcore.sh b/tools/perf/perf-with-kcore.sh
deleted file mode 100644
index 0b96545c8184..000000000000
--- a/tools/perf/perf-with-kcore.sh
+++ /dev/null
@@ -1,247 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0-only
-# perf-with-kcore: use perf with a copy of kcore
-# Copyright (c) 2014, Intel Corporation.
-#
-
-set -e
-
-usage()
-{
- echo "Usage: perf-with-kcore <perf sub-command> <perf.data directory> [<sub-command options> [ -- <workload>]]" >&2
- echo " <perf sub-command> can be record, script, report or inject" >&2
- echo " or: perf-with-kcore fix_buildid_cache_permissions" >&2
- exit 1
-}
-
-find_perf()
-{
- if [ -n "$PERF" ] ; then
- return
- fi
- PERF=`which perf || true`
- if [ -z "$PERF" ] ; then
- echo "Failed to find perf" >&2
- exit 1
- fi
- if [ ! -x "$PERF" ] ; then
- echo "Failed to find perf" >&2
- exit 1
- fi
- echo "Using $PERF"
- "$PERF" version
-}
-
-copy_kcore()
-{
- echo "Copying kcore"
-
- if [ $EUID -eq 0 ] ; then
- SUDO=""
- else
- SUDO="sudo"
- fi
-
- rm -f perf.data.junk
- ("$PERF" record -o perf.data.junk "${PERF_OPTIONS[@]}" -- sleep 60) >/dev/null 2>/dev/null &
- PERF_PID=$!
-
- # Need to make sure that perf has started
- sleep 1
-
- KCORE=$(($SUDO "$PERF" buildid-cache -v -f -k /proc/kcore >/dev/null) 2>&1)
- case "$KCORE" in
- "kcore added to build-id cache directory "*)
- KCORE_DIR=${KCORE#"kcore added to build-id cache directory "}
- ;;
- *)
- kill $PERF_PID
- wait >/dev/null 2>/dev/null || true
- rm perf.data.junk
- echo "$KCORE"
- echo "Failed to find kcore" >&2
- exit 1
- ;;
- esac
-
- kill $PERF_PID
- wait >/dev/null 2>/dev/null || true
- rm perf.data.junk
-
- $SUDO cp -a "$KCORE_DIR" "$(pwd)/$PERF_DATA_DIR"
- $SUDO rm -f "$KCORE_DIR/kcore"
- $SUDO rm -f "$KCORE_DIR/kallsyms"
- $SUDO rm -f "$KCORE_DIR/modules"
- $SUDO rmdir "$KCORE_DIR"
-
- KCORE_DIR_BASENAME=$(basename "$KCORE_DIR")
- KCORE_DIR="$(pwd)/$PERF_DATA_DIR/$KCORE_DIR_BASENAME"
-
- $SUDO chown $UID "$KCORE_DIR"
- $SUDO chown $UID "$KCORE_DIR/kcore"
- $SUDO chown $UID "$KCORE_DIR/kallsyms"
- $SUDO chown $UID "$KCORE_DIR/modules"
-
- $SUDO chgrp $GROUPS "$KCORE_DIR"
- $SUDO chgrp $GROUPS "$KCORE_DIR/kcore"
- $SUDO chgrp $GROUPS "$KCORE_DIR/kallsyms"
- $SUDO chgrp $GROUPS "$KCORE_DIR/modules"
-
- ln -s "$KCORE_DIR_BASENAME" "$PERF_DATA_DIR/kcore_dir"
-}
-
-fix_buildid_cache_permissions()
-{
- if [ $EUID -ne 0 ] ; then
- echo "This script must be run as root via sudo " >&2
- exit 1
- fi
-
- if [ -z "$SUDO_USER" ] ; then
- echo "This script must be run via sudo" >&2
- exit 1
- fi
-
- USER_HOME=$(bash <<< "echo ~$SUDO_USER")
-
- echo "Fixing buildid cache permissions"
-
- find "$USER_HOME/.debug" -xdev -type d ! -user "$SUDO_USER" -ls -exec chown "$SUDO_USER" \{\} \;
- find "$USER_HOME/.debug" -xdev -type f -links 1 ! -user "$SUDO_USER" -ls -exec chown "$SUDO_USER" \{\} \;
- find "$USER_HOME/.debug" -xdev -type l ! -user "$SUDO_USER" -ls -exec chown -h "$SUDO_USER" \{\} \;
-
- if [ -n "$SUDO_GID" ] ; then
- find "$USER_HOME/.debug" -xdev -type d ! -group "$SUDO_GID" -ls -exec chgrp "$SUDO_GID" \{\} \;
- find "$USER_HOME/.debug" -xdev -type f -links 1 ! -group "$SUDO_GID" -ls -exec chgrp "$SUDO_GID" \{\} \;
- find "$USER_HOME/.debug" -xdev -type l ! -group "$SUDO_GID" -ls -exec chgrp -h "$SUDO_GID" \{\} \;
- fi
-
- echo "Done"
-}
-
-check_buildid_cache_permissions()
-{
- if [ $EUID -eq 0 ] ; then
- return
- fi
-
- PERMISSIONS_OK+=$(find "$HOME/.debug" -xdev -type d ! -user "$USER" -print -quit)
- PERMISSIONS_OK+=$(find "$HOME/.debug" -xdev -type f -links 1 ! -user "$USER" -print -quit)
- PERMISSIONS_OK+=$(find "$HOME/.debug" -xdev -type l ! -user "$USER" -print -quit)
-
- PERMISSIONS_OK+=$(find "$HOME/.debug" -xdev -type d ! -group "$GROUPS" -print -quit)
- PERMISSIONS_OK+=$(find "$HOME/.debug" -xdev -type f -links 1 ! -group "$GROUPS" -print -quit)
- PERMISSIONS_OK+=$(find "$HOME/.debug" -xdev -type l ! -group "$GROUPS" -print -quit)
-
- if [ -n "$PERMISSIONS_OK" ] ; then
- echo "*** WARNING *** buildid cache permissions may need fixing" >&2
- fi
-}
-
-record()
-{
- echo "Recording"
-
- if [ $EUID -ne 0 ] ; then
-
- if [ "$(cat /proc/sys/kernel/kptr_restrict)" -ne 0 ] ; then
- echo "*** WARNING *** /proc/sys/kernel/kptr_restrict prevents access to kernel addresses" >&2
- fi
-
- if echo "${PERF_OPTIONS[@]}" | grep -q ' -a \|^-a \| -a$\|^-a$\| --all-cpus \|^--all-cpus \| --all-cpus$\|^--all-cpus$' ; then
- echo "*** WARNING *** system-wide tracing without root access will not be able to read all necessary information from /proc" >&2
- fi
-
- if echo "${PERF_OPTIONS[@]}" | grep -q 'intel_pt\|intel_bts\| -I\|^-I' ; then
- if [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt -1 ] ; then
- echo "*** WARNING *** /proc/sys/kernel/perf_event_paranoid restricts buffer size and tracepoint (sched_switch) use" >&2
- fi
-
- if echo "${PERF_OPTIONS[@]}" | grep -q ' --per-thread \|^--per-thread \| --per-thread$\|^--per-thread$' ; then
- true
- elif echo "${PERF_OPTIONS[@]}" | grep -q ' -t \|^-t \| -t$\|^-t$' ; then
- true
- elif [ ! -r /sys/kernel/debug -o ! -x /sys/kernel/debug ] ; then
- echo "*** WARNING *** /sys/kernel/debug permissions prevent tracepoint (sched_switch) use" >&2
- fi
- fi
- fi
-
- if [ -z "$1" ] ; then
- echo "Workload is required for recording" >&2
- usage
- fi
-
- if [ -e "$PERF_DATA_DIR" ] ; then
- echo "'$PERF_DATA_DIR' exists" >&2
- exit 1
- fi
-
- find_perf
-
- mkdir "$PERF_DATA_DIR"
-
- echo "$PERF record -o $PERF_DATA_DIR/perf.data ${PERF_OPTIONS[@]} -- $@"
- "$PERF" record -o "$PERF_DATA_DIR/perf.data" "${PERF_OPTIONS[@]}" -- "$@" || true
-
- if rmdir "$PERF_DATA_DIR" > /dev/null 2>/dev/null ; then
- exit 1
- fi
-
- copy_kcore
-
- echo "Done"
-}
-
-subcommand()
-{
- find_perf
- check_buildid_cache_permissions
- echo "$PERF $PERF_SUB_COMMAND -i $PERF_DATA_DIR/perf.data --kallsyms=$PERF_DATA_DIR/kcore_dir/kallsyms $@"
- "$PERF" $PERF_SUB_COMMAND -i "$PERF_DATA_DIR/perf.data" "--kallsyms=$PERF_DATA_DIR/kcore_dir/kallsyms" "$@"
-}
-
-if [ "$1" = "fix_buildid_cache_permissions" ] ; then
- fix_buildid_cache_permissions
- exit 0
-fi
-
-PERF_SUB_COMMAND=$1
-PERF_DATA_DIR=$2
-shift || true
-shift || true
-
-if [ -z "$PERF_SUB_COMMAND" ] ; then
- usage
-fi
-
-if [ -z "$PERF_DATA_DIR" ] ; then
- usage
-fi
-
-case "$PERF_SUB_COMMAND" in
-"record")
- while [ "$1" != "--" ] ; do
- PERF_OPTIONS+=("$1")
- shift || break
- done
- if [ "$1" != "--" ] ; then
- echo "Options and workload are required for recording" >&2
- usage
- fi
- shift
- record "$@"
-;;
-"script")
- subcommand "$@"
-;;
-"report")
- subcommand "$@"
-;;
-"inject")
- subcommand "$@"
-;;
-*)
- usage
-;;
-esac
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/branch.json
new file mode 100644
index 000000000000..ece201718284
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/branch.json
@@ -0,0 +1,11 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/bus.json
new file mode 100644
index 000000000000..75d850b781ac
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/bus.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/cache.json
new file mode 100644
index 000000000000..8a9a95e05c32
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/cache.json
@@ -0,0 +1,32 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/exception.json
new file mode 100644
index 000000000000..27c3fe9c831a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/exception.json
@@ -0,0 +1,14 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/instruction.json
new file mode 100644
index 000000000000..7c018f439206
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/instruction.json
@@ -0,0 +1,29 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "LD_RETIRED"
+ },
+ {
+ "ArchStdEvent": "ST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_RETIRED"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/memory.json
new file mode 100644
index 000000000000..2c319f936957
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/memory.json
@@ -0,0 +1,8 @@
+[
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/branch.json
new file mode 100644
index 000000000000..ece201718284
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/branch.json
@@ -0,0 +1,11 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/bus.json
new file mode 100644
index 000000000000..75d850b781ac
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/bus.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/cache.json
new file mode 100644
index 000000000000..8a9a95e05c32
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/cache.json
@@ -0,0 +1,32 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/exception.json
new file mode 100644
index 000000000000..27c3fe9c831a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/exception.json
@@ -0,0 +1,14 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/instruction.json
new file mode 100644
index 000000000000..df9f94cfc8d5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/instruction.json
@@ -0,0 +1,44 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "LD_RETIRED"
+ },
+ {
+ "ArchStdEvent": "ST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/memory.json
new file mode 100644
index 000000000000..2c319f936957
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/memory.json
@@ -0,0 +1,8 @@
+[
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/branch.json
new file mode 100644
index 000000000000..411fcbdbd7e6
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/branch.json
@@ -0,0 +1,59 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ },
+ {
+ "PublicDescription": "Predicted conditional branch executed. This event counts when any branch that the conditional predictor can predict is retired. This event still counts when branch prediction is disabled due to the Memory Management Unit (MMU) being off",
+ "EventCode": "0xC9",
+ "EventName": "BR_COND_PRED",
+ "BriefDescription": "Predicted conditional branch executed. This event counts when any branch that the conditional predictor can predict is retired. This event still counts when branch prediction is disabled due to the Memory Management Unit (MMU) being off"
+ },
+ {
+ "PublicDescription": "Indirect branch mispredicted. This event counts when any indirect branch that the Branch Target Address Cache (BTAC) can predict is retired and has mispredicted either the condition or the address. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xCA",
+ "EventName": "BR_INDIRECT_MIS_PRED",
+ "BriefDescription": "Indirect branch mispredicted. This event counts when any indirect branch that the Branch Target Address Cache (BTAC) can predict is retired and has mispredicted either the condition or the address. This event still counts when branch prediction is disabled due to the MMU being off"
+ },
+ {
+ "PublicDescription": "Indirect branch mispredicted due to address miscompare. This event counts when any indirect branch that the BTAC can predict is retired, was taken, correctly predicted the condition, and has mispredicted the address. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xCB",
+ "EventName": "BR_INDIRECT_ADDR_MIS_PRED",
+ "BriefDescription": "Indirect branch mispredicted due to address miscompare. This event counts when any indirect branch that the BTAC can predict is retired, was taken, correctly predicted the condition, and has mispredicted the address. This event still counts when branch prediction is disabled due to the MMU being off"
+ },
+ {
+ "PublicDescription": "Conditional branch mispredicted. This event counts when any branch that the conditional predictor can predict is retired and has mispredicted the condition. This event still counts when branch prediction is disabled due to the MMU being off. Conditional indirect branches that correctly predict the condition but mispredict the address do not count",
+ "EventCode": "0xCC",
+ "EventName": "BR_COND_MIS_PRED",
+ "BriefDescription": "Conditional branch mispredicted. This event counts when any branch that the conditional predictor can predict is retired and has mispredicted the condition. This event still counts when branch prediction is disabled due to the MMU being off. Conditional indirect branches that correctly predict the condition but mispredict the address do not count"
+ },
+ {
+ "PublicDescription": "Indirect branch with predicted address executed. This event counts when any indirect branch that the BTAC can predict is retired, was taken, and correctly predicted the condition. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xCD",
+ "EventName": "BR_INDIRECT_ADDR_PRED",
+ "BriefDescription": "Indirect branch with predicted address executed. This event counts when any indirect branch that the BTAC can predict is retired, was taken, and correctly predicted the condition. This event still counts when branch prediction is disabled due to the MMU being off"
+ },
+ {
+ "PublicDescription": "Procedure return with predicted address executed. This event counts when any procedure return that the call-return stack can predict is retired, was taken, and correctly predicted the condition. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xCE",
+ "EventName": "BR_RETURN_ADDR_PRED",
+ "BriefDescription": "Procedure return with predicted address executed. This event counts when any procedure return that the call-return stack can predict is retired, was taken, and correctly predicted the condition. This event still counts when branch prediction is disabled due to the MMU being off"
+ },
+ {
+ "PublicDescription": "Procedure return mispredicted due to address miscompare. This event counts when any procedure return that the call-return stack can predict is retired, was taken, correctly predicted the condition, and has mispredicted the address. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xCF",
+ "EventName": "BR_RETURN_ADDR_MIS_PRED",
+ "BriefDescription": "Procedure return mispredicted due to address miscompare. This event counts when any procedure return that the call-return stack can predict is retired, was taken, correctly predicted the condition, and has mispredicted the address. This event still counts when branch prediction is disabled due to the MMU being off"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/bus.json
new file mode 100644
index 000000000000..75d850b781ac
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/bus.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/cache.json
new file mode 100644
index 000000000000..27cd913e186b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/cache.json
@@ -0,0 +1,182 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL_RD"
+ },
+ {
+ "PublicDescription": "L2 cache refill due to prefetch. If the complex is configured with a per-complex L2 cache, this event does not count. If the complex is configured without a per-complex L2 cache, this event counts the cluster cache event, as defined by L3D_CACHE_REFILL_PREFETCH. If neither a per-complex cache or a cluster cache is configured, this event is not implemented",
+ "EventCode": "0xC1",
+ "EventName": "L2D_CACHE_REFILL_PREFETCH",
+ "BriefDescription": "L2 cache refill due to prefetch. If the complex is configured with a per-complex L2 cache, this event does not count. If the complex is configured without a per-complex L2 cache, this event counts the cluster cache event, as defined by L3D_CACHE_REFILL_PREFETCH. If neither a per-complex cache or a cluster cache is configured, this event is not implemented"
+ },
+ {
+ "PublicDescription": "L1 data cache refill due to prefetch. This event counts any linefills from the prefetcher that cause an allocation into the L1 data cache",
+ "EventCode": "0xC2",
+ "EventName": "L1D_CACHE_REFILL_PREFETCH",
+ "BriefDescription": "L1 data cache refill due to prefetch. This event counts any linefills from the prefetcher that cause an allocation into the L1 data cache"
+ },
+ {
+ "PublicDescription": "L2 cache write streaming mode. This event counts for each cycle where the core is in write streaming mode and is not allocating writes into the L2 cache",
+ "EventCode": "0xC3",
+ "EventName": "L2D_WS_MODE",
+ "BriefDescription": "L2 cache write streaming mode. This event counts for each cycle where the core is in write streaming mode and is not allocating writes into the L2 cache"
+ },
+ {
+ "PublicDescription": "L1 data cache entering write streaming mode. This event counts for each entry into write streaming mode",
+ "EventCode": "0xC4",
+ "EventName": "L1D_WS_MODE_ENTRY",
+ "BriefDescription": "L1 data cache entering write streaming mode. This event counts for each entry into write streaming mode"
+ },
+ {
+ "PublicDescription": "L1 data cache write streaming mode. This event counts for each cycle where the core is in write streaming mode and is not allocating writes into the L1 data cache",
+ "EventCode": "0xC5",
+ "EventName": "L1D_WS_MODE",
+ "BriefDescription": "L1 data cache write streaming mode. This event counts for each cycle where the core is in write streaming mode and is not allocating writes into the L1 data cache"
+ },
+ {
+ "PublicDescription": "L3 cache write streaming mode. This event counts for each cycle where the core is in write streaming mode and is not allocating writes into the L3 cache",
+ "EventCode": "0xC7",
+ "EventName": "L3D_WS_MODE",
+ "BriefDescription": "L3 cache write streaming mode. This event counts for each cycle where the core is in write streaming mode and is not allocating writes into the L3 cache"
+ },
+ {
+ "PublicDescription": "Last level cache write streaming mode. This event counts for each cycle where the core is in write streaming mode and is not allocating writes into the system cache",
+ "EventCode": "0xC8",
+ "EventName": "LL_WS_MODE",
+ "BriefDescription": "Last level cache write streaming mode. This event counts for each cycle where the core is in write streaming mode and is not allocating writes into the system cache"
+ },
+ {
+ "PublicDescription": "L2 TLB walk cache access. This event does not count if the MMU is disabled",
+ "EventCode": "0xD0",
+ "EventName": "L2D_WALK_TLB",
+ "BriefDescription": "L2 TLB walk cache access. This event does not count if the MMU is disabled"
+ },
+ {
+ "PublicDescription": "L2 TLB walk cache refill. This event does not count if the MMU is disabled",
+ "EventCode": "0xD1",
+ "EventName": "L2D_WALK_TLB_REFILL",
+ "BriefDescription": "L2 TLB walk cache refill. This event does not count if the MMU is disabled"
+ },
+ {
+ "PublicDescription": "L2 TLB IPA cache access. This event counts on each access to the IPA cache. If a single translation table walk needs to make multiple accesses to the IPA cache, each access is counted. If stage 2 translation is disabled, this event does not count",
+ "EventCode": "0xD4",
+ "EventName": "L2D_S2_TLB",
+ "BriefDescription": "L2 TLB IPA cache access. This event counts on each access to the IPA cache. If a single translation table walk needs to make multiple accesses to the IPA cache, each access is counted. If stage 2 translation is disabled, this event does not count"
+ },
+ {
+ "PublicDescription": "L2 TLB IPA cache refill. This event counts on each refill of the IPA cache. If a single translation table walk needs to make multiple accesses to the IPA cache, each access that causes a refill is counted. If stage 2 translation is disabled, this event does not count",
+ "EventCode": "0xD5",
+ "EventName": "L2D_S2_TLB_REFILL",
+ "BriefDescription": "L2 TLB IPA cache refill. This event counts on each refill of the IPA cache. If a single translation table walk needs to make multiple accesses to the IPA cache, each access that causes a refill is counted. If stage 2 translation is disabled, this event does not count"
+ },
+ {
+ "PublicDescription": "L2 cache stash dropped. This event counts on each stash request that is received from the interconnect or the Accelerator Coherency Port (ACP), that targets L2 cache and is dropped due to lack of buffer space to hold the request",
+ "EventCode": "0xD6",
+ "EventName": "L2D_CACHE_STASH_DROPPED",
+ "BriefDescription": "L2 cache stash dropped. This event counts on each stash request that is received from the interconnect or the Accelerator Coherency Port (ACP), that targets L2 cache and is dropped due to lack of buffer space to hold the request"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_LMISS"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_LMISS_RD"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/exception.json
new file mode 100644
index 000000000000..27c3fe9c831a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/exception.json
@@ -0,0 +1,14 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/instruction.json
new file mode 100644
index 000000000000..3039d03412df
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/instruction.json
@@ -0,0 +1,95 @@
+[
+ {
+ "ArchStdEvent": "LD_RETIRED"
+ },
+ {
+ "ArchStdEvent": "ST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "SVE_INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_HP_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_SP_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT8_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT16_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT32_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT64_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/memory.json
new file mode 100644
index 000000000000..38f459502514
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/memory.json
@@ -0,0 +1,32 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "LDST_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "LD_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "ST_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/pipeline.json
new file mode 100644
index 000000000000..325daaa7b809
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/pipeline.json
@@ -0,0 +1,107 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT"
+ },
+ {
+ "PublicDescription": "No operation issued due to the frontend, cache miss. This event counts every cycle that the Data Processing Unit (DPU) instruction queue is empty and there is an instruction cache miss being processed",
+ "EventCode": "0xE1",
+ "EventName": "STALL_FRONTEND_CACHE",
+ "BriefDescription": "No operation issued due to the frontend, cache miss. This event counts every cycle that the Data Processing Unit (DPU) instruction queue is empty and there is an instruction cache miss being processed"
+ },
+ {
+ "PublicDescription": "No operation issued due to the frontend, TLB miss. This event counts every cycle that the DPU instruction queue is empty and there is an instruction L1 TLB miss being processed",
+ "EventCode": "0xE2",
+ "EventName": "STALL_FRONTEND_TLB",
+ "BriefDescription": "No operation issued due to the frontend, TLB miss. This event counts every cycle that the DPU instruction queue is empty and there is an instruction L1 TLB miss being processed"
+ },
+ {
+ "PublicDescription": "No operation issued due to the frontend, pre-decode error",
+ "EventCode": "0xE3",
+ "EventName": "STALL_FRONTEND_PDERR",
+ "BriefDescription": "No operation issued due to the frontend, pre-decode error"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend interlock. This event counts every cycle where the issue of an operation is stalled and there is an interlock. Stall cycles due to a stall in the Wr stage are excluded",
+ "EventCode": "0xE4",
+ "EventName": "STALL_BACKEND_ILOCK",
+ "BriefDescription": "No operation issued due to the backend interlock. This event counts every cycle where the issue of an operation is stalled and there is an interlock. Stall cycles due to a stall in the Wr stage are excluded"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, address interlock. This event counts every cycle where the issue of an operation is stalled and there is an interlock on an address operand. This type of interlock is caused by a load/store instruction waiting for data to calculate the address. Stall cycles due to a stall in the Wr stage are excluded",
+ "EventCode": "0xE5",
+ "EventName": "STALL_BACKEND_ILOCK_ADDR",
+ "BriefDescription": "No operation issued due to the backend, address interlock. This event counts every cycle where the issue of an operation is stalled and there is an interlock on an address operand. This type of interlock is caused by a load/store instruction waiting for data to calculate the address. Stall cycles due to a stall in the Wr stage are excluded"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, interlock, or the Vector Processing Unit (VPU). This event counts every cycle where there is a stall or an interlock that is caused by a VPU instruction. Stall cycles due to a stall in the Wr stage are excluded",
+ "EventCode": "0xE6",
+ "EventName": "STALL_BACKEND_ILOCK_VPU",
+ "BriefDescription": "No operation issued due to the backend, interlock, or the Vector Processing Unit (VPU). This event counts every cycle where there is a stall or an interlock that is caused by a VPU instruction. Stall cycles due to a stall in the Wr stage are excluded"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, load. This event counts every cycle where there is a stall in the Wr stage due to a load",
+ "EventCode": "0xE7",
+ "EventName": "STALL_BACKEND_LD",
+ "BriefDescription": "No operation issued due to the backend, load. This event counts every cycle where there is a stall in the Wr stage due to a load"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, store. This event counts every cycle where there is a stall in the Wr stage due to a store",
+ "EventCode": "0xE8",
+ "EventName": "STALL_BACKEND_ST",
+ "BriefDescription": "No operation issued due to the backend, store. This event counts every cycle where there is a stall in the Wr stage due to a store"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, load, cache miss. This event counts every cycle where there is a stall in the Wr stage due to a load that is waiting on data. The event counts for stalls that are caused by missing the cache or where the data is Non-cacheable",
+ "EventCode": "0xE9",
+ "EventName": "STALL_BACKEND_LD_CACHE",
+ "BriefDescription": "No operation issued due to the backend, load, cache miss. This event counts every cycle where there is a stall in the Wr stage due to a load that is waiting on data. The event counts for stalls that are caused by missing the cache or where the data is Non-cacheable"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, load, TLB miss. This event counts every cycle where there is a stall in the Wr stage due to a load that misses in the L1 TLB",
+ "EventCode": "0xEA",
+ "EventName": "STALL_BACKEND_LD_TLB",
+ "BriefDescription": "No operation issued due to the backend, load, TLB miss. This event counts every cycle where there is a stall in the Wr stage due to a load that misses in the L1 TLB"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, store, Store Buffer (STB) full. This event counts every cycle where there is a stall in the Wr stage because of a store operation that is waiting due to the STB being full",
+ "EventCode": "0xEB",
+ "EventName": "STALL_BACKEND_ST_STB",
+ "BriefDescription": "No operation issued due to the backend, store, Store Buffer (STB) full. This event counts every cycle where there is a stall in the Wr stage because of a store operation that is waiting due to the STB being full"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, store, TLB miss. This event counts every cycle where there is a stall in the Wr stage because of a store operation that has missed in the L1 TLB",
+ "EventCode": "0xEC",
+ "EventName": "STALL_BACKEND_ST_TLB",
+ "BriefDescription": "No operation issued due to the backend, store, TLB miss. This event counts every cycle where there is a stall in the Wr stage because of a store operation that has missed in the L1 TLB"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, VPU hazard. This event counts every cycle where the core stalls due to contention for the VPU with the other core",
+ "EventCode": "0xED",
+ "EventName": "STALL_BACKEND_VPU_HAZARD",
+ "BriefDescription": "No operation issued due to the backend, VPU hazard. This event counts every cycle where the core stalls due to contention for the VPU with the other core"
+ },
+ {
+ "PublicDescription": "Issue slot not issued due to interlock. For each cycle, this event counts each dispatch slot that does not issue due to an interlock",
+ "EventCode": "0xEE",
+ "EventName": "STALL_SLOT_BACKEND_ILOCK",
+ "BriefDescription": "Issue slot not issued due to interlock. For each cycle, this event counts each dispatch slot that does not issue due to an interlock"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_MEM"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/pmu.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/pmu.json
new file mode 100644
index 000000000000..d8b7b9f9e5fa
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/pmu.json
@@ -0,0 +1,8 @@
+[
+ {
+ "ArchStdEvent": "PMU_OVFS"
+ },
+ {
+ "ArchStdEvent": "PMU_HOVFS"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/trace.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/trace.json
new file mode 100644
index 000000000000..33672a8711d4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/trace.json
@@ -0,0 +1,32 @@
+[
+ {
+ "ArchStdEvent": "TRB_WRAP"
+ },
+ {
+ "ArchStdEvent": "TRB_TRIG"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT0"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT1"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT2"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT3"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT4"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT5"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT6"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT7"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/branch.json
new file mode 100644
index 000000000000..8633d5db42a0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/branch.json
@@ -0,0 +1,59 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ },
+ {
+ "PublicDescription": "Predicted conditional branch executed.This event counts when any branch which can be predicted by the conditional predictor is retired. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xC9",
+ "EventName": "BR_COND_PRED",
+ "BriefDescription": "Predicted conditional branch executed.This event counts when any branch which can be predicted by the conditional predictor is retired. This event still counts when branch prediction is disabled due to the MMU being off"
+ },
+ {
+ "PublicDescription": "Indirect branch mis-predicted.This event counts when any indirect branch which can be predicted by the BTAC is retired, and has mispredicted for either the condition or the address. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xCA",
+ "EventName": "BR_INDIRECT_MIS_PRED",
+ "BriefDescription": "Indirect branch mis-predicted.This event counts when any indirect branch which can be predicted by the BTAC is retired, and has mispredicted for either the condition or the address. This event still counts when branch prediction is disabled due to the MMU being off"
+ },
+ {
+ "PublicDescription": "Indirect branch mis-predicted due to address mis-compare.This event counts when any indirect branch which can be predicted by the BTAC is retired, was taken and correctly predicted the condition, and has mispredicted the address. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xCB",
+ "EventName": "BR_INDIRECT_ADDR_MIS_PRED",
+ "BriefDescription": "Indirect branch mis-predicted due to address mis-compare.This event counts when any indirect branch which can be predicted by the BTAC is retired, was taken and correctly predicted the condition, and has mispredicted the address. This event still counts when branch prediction is disabled due to the MMU being off"
+ },
+ {
+ "PublicDescription": "Conditional branch mis-predicted.This event counts when any branch which can be predicted by the conditional predictor is retired, and has mis-predicted the condition. This event still counts when branch prediction is disabled due to the MMU being off. Conditional indirect branches which correctly predicted the condition but mis-predicted on the address do not count this event",
+ "EventCode": "0xCC",
+ "EventName": "BR_COND_MIS_PRED",
+ "BriefDescription": "Conditional branch mis-predicted.This event counts when any branch which can be predicted by the conditional predictor is retired, and has mis-predicted the condition. This event still counts when branch prediction is disabled due to the MMU being off. Conditional indirect branches which correctly predicted the condition but mis-predicted on the address do not count this event"
+ },
+ {
+ "PublicDescription": "Indirect branch with predicted address executed.This event counts when any indirect branch which can be predicted by the BTAC is retired, was taken and correctly predicted the condition. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xCD",
+ "EventName": "BR_INDIRECT_ADDR_PRED",
+ "BriefDescription": "Indirect branch with predicted address executed.This event counts when any indirect branch which can be predicted by the BTAC is retired, was taken and correctly predicted the condition. This event still counts when branch prediction is disabled due to the MMU being off"
+ },
+ {
+ "PublicDescription": "Procedure return with predicted address executed.This event counts when any procedure return which can be predicted by the CRS is retired, was taken and correctly predicted the condition. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xCE",
+ "EventName": "BR_RETURN_ADDR_PRED",
+ "BriefDescription": "Procedure return with predicted address executed.This event counts when any procedure return which can be predicted by the CRS is retired, was taken and correctly predicted the condition. This event still counts when branch prediction is disabled due to the MMU being off"
+ },
+ {
+ "PublicDescription": "Procedure return mis-predicted due to address mis-compare.This event counts when any procedure return which can be predicted by the CRS is retired, was taken and correctly predicted the condition, and has mispredicted the address. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xCF",
+ "EventName": "BR_RETURN_ADDR_MIS_PRED",
+ "BriefDescription": "Procedure return mis-predicted due to address mis-compare.This event counts when any procedure return which can be predicted by the CRS is retired, was taken and correctly predicted the condition, and has mispredicted the address. This event still counts when branch prediction is disabled due to the MMU being off"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/bus.json
new file mode 100644
index 000000000000..75d850b781ac
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/bus.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/cache.json
new file mode 100644
index 000000000000..cd684c7ae026
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/cache.json
@@ -0,0 +1,188 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL_RD"
+ },
+ {
+ "PublicDescription": "Level 3 cache refill due to prefetch. This event counts any linefills from the hardware prefetcher which cause an allocation into the L3 cache. Note It might not be possible to both distinguish hardware vs software prefetches and also which prefetches cause an allocation. If so, only hardware prefetches should be counted, regardless of whether they allocate. If either the core is configured without a per-core L2 or the cluster is configured without an L3 cache, this event is not implemented",
+ "EventCode": "0xC0",
+ "EventName": "L3D_CACHE_REFILL_PREFETCH",
+ "BriefDescription": "Level 3 cache refill due to prefetch. This event counts any linefills from the hardware prefetcher which cause an allocation into the L3 cache. Note It might not be possible to both distinguish hardware vs software prefetches and also which prefetches cause an allocation. If so, only hardware prefetches should be counted, regardless of whether they allocate. If either the core is configured without a per-core L2 or the cluster is configured without an L3 cache, this event is not implemented"
+ },
+ {
+ "PublicDescription": "Level 2 cache refill due to prefetch. +//0 If the core is configured with a per-core L2 cache: This event does not count. +//0 If the core is configured without a per-core L2 cache: This event counts the cluster cache event, as defined by L3D_CACHE_REFILL_PREFETCH. +//0 If there is neither a per-core cache nor a cluster cache configured, this event is not implemented",
+ "EventCode": "0xC1",
+ "EventName": "L2D_CACHE_REFILL_PREFETCH",
+ "BriefDescription": "Level 2 cache refill due to prefetch. +//0 If the core is configured with a per-core L2 cache: This event does not count. +//0 If the core is configured without a per-core L2 cache: This event counts the cluster cache event, as defined by L3D_CACHE_REFILL_PREFETCH. +//0 If there is neither a per-core cache nor a cluster cache configured, this event is not implemented"
+ },
+ {
+ "PublicDescription": "Level 1 data cache refill due to prefetch. This event counts any linefills from the prefetcher which cause an allocation into the L1 D-cache",
+ "EventCode": "0xC2",
+ "EventName": "L1D_CACHE_REFILL_PREFETCH",
+ "BriefDescription": "Level 1 data cache refill due to prefetch. This event counts any linefills from the prefetcher which cause an allocation into the L1 D-cache"
+ },
+ {
+ "PublicDescription": "Level 2 cache write streaming mode. This event counts for each cycle where the core is in write-streaming mode and not allocating writes into the L2 cache",
+ "EventCode": "0xC3",
+ "EventName": "L2D_WS_MODE",
+ "BriefDescription": "Level 2 cache write streaming mode. This event counts for each cycle where the core is in write-streaming mode and not allocating writes into the L2 cache"
+ },
+ {
+ "PublicDescription": "Level 1 data cache entering write streaming mode.This event counts for each entry into write-streaming mode",
+ "EventCode": "0xC4",
+ "EventName": "L1D_WS_MODE_ENTRY",
+ "BriefDescription": "Level 1 data cache entering write streaming mode.This event counts for each entry into write-streaming mode"
+ },
+ {
+ "PublicDescription": "Level 1 data cache write streaming mode.This event counts for each cycle where the core is in write-streaming mode and not allocating writes into the L1 D-cache",
+ "EventCode": "0xC5",
+ "EventName": "L1D_WS_MODE",
+ "BriefDescription": "Level 1 data cache write streaming mode.This event counts for each cycle where the core is in write-streaming mode and not allocating writes into the L1 D-cache"
+ },
+ {
+ "PublicDescription": "Level 3 cache write streaming mode.This event counts for each cycle where the core is in write-streaming mode and not allocating writes into the L3 cache",
+ "EventCode": "0xC7",
+ "EventName": "L3D_WS_MODE",
+ "BriefDescription": "Level 3 cache write streaming mode.This event counts for each cycle where the core is in write-streaming mode and not allocating writes into the L3 cache"
+ },
+ {
+ "PublicDescription": "Level 2 TLB last-level walk cache access.This event does not count if the MMU is disabled",
+ "EventCode": "0xD0",
+ "EventName": "L2D_LLWALK_TLB",
+ "BriefDescription": "Level 2 TLB last-level walk cache access.This event does not count if the MMU is disabled"
+ },
+ {
+ "PublicDescription": "Level 2 TLB last-level walk cache refill.This event does not count if the MMU is disabled",
+ "EventCode": "0xD1",
+ "EventName": "L2D_LLWALK_TLB_REFILL",
+ "BriefDescription": "Level 2 TLB last-level walk cache refill.This event does not count if the MMU is disabled"
+ },
+ {
+ "PublicDescription": "Level 2 TLB level-2 walk cache access.This event counts accesses to the level-2 walk cache where the last-level walk cache has missed. The event only counts when the translation regime of the pagewalk uses level 2 descriptors. This event does not count if the MMU is disabled",
+ "EventCode": "0xD2",
+ "EventName": "L2D_L2WALK_TLB",
+ "BriefDescription": "Level 2 TLB level-2 walk cache access.This event counts accesses to the level-2 walk cache where the last-level walk cache has missed. The event only counts when the translation regime of the pagewalk uses level 2 descriptors. This event does not count if the MMU is disabled"
+ },
+ {
+ "PublicDescription": "Level 2 TLB level-2 walk cache refill.This event does not count if the MMU is disabled",
+ "EventCode": "0xD3",
+ "EventName": "L2D_L2WALK_TLB_REFILL",
+ "BriefDescription": "Level 2 TLB level-2 walk cache refill.This event does not count if the MMU is disabled"
+ },
+ {
+ "PublicDescription": "Level 2 TLB IPA cache access. This event counts on each access to the IPA cache. +//0 If a single pagewalk needs to make multiple accesses to the IPA cache, each access is counted. +//0 If stage 2 translation is disabled, this event does not count",
+ "EventCode": "0xD4",
+ "EventName": "L2D_S2_TLB",
+ "BriefDescription": "Level 2 TLB IPA cache access. This event counts on each access to the IPA cache. +//0 If a single pagewalk needs to make multiple accesses to the IPA cache, each access is counted. +//0 If stage 2 translation is disabled, this event does not count"
+ },
+ {
+ "PublicDescription": "Level 2 TLB IPA cache refill. This event counts on each refill of the IPA cache. +//0 If a single pagewalk needs to make multiple accesses to the IPA cache, each access which causes a refill is counted. +//0 If stage 2 translation is disabled, this event does not count",
+ "EventCode": "0xD5",
+ "EventName": "L2D_S2_TLB_REFILL",
+ "BriefDescription": "Level 2 TLB IPA cache refill. This event counts on each refill of the IPA cache. +//0 If a single pagewalk needs to make multiple accesses to the IPA cache, each access which causes a refill is counted. +//0 If stage 2 translation is disabled, this event does not count"
+ },
+ {
+ "PublicDescription": "Level 2 cache stash dropped.This event counts on each stash request received from the interconnect or ACP, that is targeting L2 and gets dropped due to lack of buffer space to hold the request",
+ "EventCode": "0xD6",
+ "EventName": "L2D_CACHE_STASH_DROPPED",
+ "BriefDescription": "Level 2 cache stash dropped.This event counts on each stash request received from the interconnect or ACP, that is targeting L2 and gets dropped due to lack of buffer space to hold the request"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/exception.json
new file mode 100644
index 000000000000..99f1ab987709
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/exception.json
@@ -0,0 +1,20 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ },
+ {
+ "PublicDescription": "Predecode error",
+ "EventCode": "0xC6",
+ "EventName": "PREDECODE_ERROR",
+ "BriefDescription": "Predecode error"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/instruction.json
new file mode 100644
index 000000000000..e762fab9e2d8
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/instruction.json
@@ -0,0 +1,65 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "LD_RETIRED"
+ },
+ {
+ "ArchStdEvent": "ST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/memory.json
new file mode 100644
index 000000000000..d9229173d189
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/memory.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/pipeline.json
new file mode 100644
index 000000000000..6c6b5869cf70
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/pipeline.json
@@ -0,0 +1,80 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ },
+ {
+ "PublicDescription": "No operation issued due to the frontend, cache miss.This event counts every cycle the DPU IQ is empty and there is an instruction cache miss being processed",
+ "EventCode": "0xE1",
+ "EventName": "STALL_FRONTEND_CACHE",
+ "BriefDescription": "No operation issued due to the frontend, cache miss.This event counts every cycle the DPU IQ is empty and there is an instruction cache miss being processed"
+ },
+ {
+ "PublicDescription": "No operation issued due to the frontend, TLB miss.This event counts every cycle the DPU IQ is empty and there is an instruction L1 TLB miss being processed",
+ "EventCode": "0xE2",
+ "EventName": "STALL_FRONTEND_TLB",
+ "BriefDescription": "No operation issued due to the frontend, TLB miss.This event counts every cycle the DPU IQ is empty and there is an instruction L1 TLB miss being processed"
+ },
+ {
+ "PublicDescription": "No operation issued due to the frontend, pre-decode error.This event counts every cycle the DPU IQ is empty and there is a pre-decode error being processed",
+ "EventCode": "0xE3",
+ "EventName": "STALL_FRONTEND_PDERR",
+ "BriefDescription": "No operation issued due to the frontend, pre-decode error.This event counts every cycle the DPU IQ is empty and there is a pre-decode error being processed"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend interlock.This event counts every cycle that issue is stalled and there is an interlock. Stall cycles due to a stall in Wr (typically awaiting load data) are excluded",
+ "EventCode": "0xE4",
+ "EventName": "STALL_BACKEND_ILOCK",
+ "BriefDescription": "No operation issued due to the backend interlock.This event counts every cycle that issue is stalled and there is an interlock. Stall cycles due to a stall in Wr (typically awaiting load data) are excluded"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, interlock, AGU.This event counts every cycle that issue is stalled and there is an interlock that is due to a load/store instruction waiting for data to calculate the address in the AGU. Stall cycles due to a stall in Wr (typically awaiting load data) are excluded",
+ "EventCode": "0xE5",
+ "EventName": "STALL_BACKEND_ILOCK_AGU",
+ "BriefDescription": "No operation issued due to the backend, interlock, AGU.This event counts every cycle that issue is stalled and there is an interlock that is due to a load/store instruction waiting for data to calculate the address in the AGU. Stall cycles due to a stall in Wr (typically awaiting load data) are excluded"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, interlock, FPU.This event counts every cycle that issue is stalled and there is an interlock that is due to an FPU/NEON instruction. Stall cycles due to a stall in the Wr stage (typically awaiting load data) are excluded",
+ "EventCode": "0xE6",
+ "EventName": "STALL_BACKEND_ILOCK_FPU",
+ "BriefDescription": "No operation issued due to the backend, interlock, FPU.This event counts every cycle that issue is stalled and there is an interlock that is due to an FPU/NEON instruction. Stall cycles due to a stall in the Wr stage (typically awaiting load data) are excluded"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, load.This event counts every cycle there is a stall in the Wr stage due to a load",
+ "EventCode": "0xE7",
+ "EventName": "STALL_BACKEND_LD",
+ "BriefDescription": "No operation issued due to the backend, load.This event counts every cycle there is a stall in the Wr stage due to a load"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, store.This event counts every cycle there is a stall in the Wr stage due to a store",
+ "EventCode": "0xE8",
+ "EventName": "STALL_BACKEND_ST",
+ "BriefDescription": "No operation issued due to the backend, store.This event counts every cycle there is a stall in the Wr stage due to a store"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, load, cache miss.This event counts every cycle there is a stall in the Wr stage due to a load which is waiting on data (due to missing the cache or being non-cacheable)",
+ "EventCode": "0xE9",
+ "EventName": "STALL_BACKEND_LD_CACHE",
+ "BriefDescription": "No operation issued due to the backend, load, cache miss.This event counts every cycle there is a stall in the Wr stage due to a load which is waiting on data (due to missing the cache or being non-cacheable)"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, load, TLB miss.This event counts every cycle there is a stall in the Wr stage due to a load which has missed in the L1 TLB",
+ "EventCode": "0xEA",
+ "EventName": "STALL_BACKEND_LD_TLB",
+ "BriefDescription": "No operation issued due to the backend, load, TLB miss.This event counts every cycle there is a stall in the Wr stage due to a load which has missed in the L1 TLB"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, store, STB full.This event counts every cycle there is a stall in the Wr stage due to a store which is waiting due to the STB being full",
+ "EventCode": "0xEB",
+ "EventName": "STALL_BACKEND_ST_STB",
+ "BriefDescription": "No operation issued due to the backend, store, STB full.This event counts every cycle there is a stall in the Wr stage due to a store which is waiting due to the STB being full"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, store, TLB miss.This event counts every cycle there is a stall in the Wr stage due to a store which has missed in the L1 TLB",
+ "EventCode": "0xEC",
+ "EventName": "STALL_BACKEND_ST_TLB",
+ "BriefDescription": "No operation issued due to the backend, store, TLB miss.This event counts every cycle there is a stall in the Wr stage due to a store which has missed in the L1 TLB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/branch.json
new file mode 100644
index 000000000000..2f2d137f5f55
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/branch.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/bus.json
new file mode 100644
index 000000000000..31505994c06c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/bus.json
@@ -0,0 +1,29 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_SHARED"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_NOT_SHARED"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_NORMAL"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_PERIPH"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/cache.json
new file mode 100644
index 000000000000..1bd59e7d982b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/cache.json
@@ -0,0 +1,80 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/core-imp-def.json
deleted file mode 100644
index 543c7692677a..000000000000
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/core-imp-def.json
+++ /dev/null
@@ -1,179 +0,0 @@
-[
- {
- "ArchStdEvent": "L1D_CACHE_RD"
- },
- {
- "ArchStdEvent": "L1D_CACHE_WR"
- },
- {
- "ArchStdEvent": "L1D_CACHE_REFILL_RD"
- },
- {
- "ArchStdEvent": "L1D_CACHE_REFILL_WR"
- },
- {
- "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
- },
- {
- "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
- },
- {
- "ArchStdEvent": "L1D_CACHE_INVAL"
- },
- {
- "ArchStdEvent": "L1D_TLB_REFILL_RD"
- },
- {
- "ArchStdEvent": "L1D_TLB_REFILL_WR"
- },
- {
- "ArchStdEvent": "L2D_CACHE_RD"
- },
- {
- "ArchStdEvent": "L2D_CACHE_WR"
- },
- {
- "ArchStdEvent": "L2D_CACHE_REFILL_RD"
- },
- {
- "ArchStdEvent": "L2D_CACHE_REFILL_WR"
- },
- {
- "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
- },
- {
- "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
- },
- {
- "ArchStdEvent": "L2D_CACHE_INVAL"
- },
- {
- "ArchStdEvent": "BUS_ACCESS_RD"
- },
- {
- "ArchStdEvent": "BUS_ACCESS_WR"
- },
- {
- "ArchStdEvent": "BUS_ACCESS_SHARED"
- },
- {
- "ArchStdEvent": "BUS_ACCESS_NOT_SHARED"
- },
- {
- "ArchStdEvent": "BUS_ACCESS_NORMAL"
- },
- {
- "ArchStdEvent": "BUS_ACCESS_PERIPH"
- },
- {
- "ArchStdEvent": "MEM_ACCESS_RD"
- },
- {
- "ArchStdEvent": "MEM_ACCESS_WR"
- },
- {
- "ArchStdEvent": "UNALIGNED_LD_SPEC"
- },
- {
- "ArchStdEvent": "UNALIGNED_ST_SPEC"
- },
- {
- "ArchStdEvent": "UNALIGNED_LDST_SPEC"
- },
- {
- "ArchStdEvent": "LDREX_SPEC"
- },
- {
- "ArchStdEvent": "STREX_PASS_SPEC"
- },
- {
- "ArchStdEvent": "STREX_FAIL_SPEC"
- },
- {
- "ArchStdEvent": "LD_SPEC"
- },
- {
- "ArchStdEvent": "ST_SPEC"
- },
- {
- "ArchStdEvent": "LDST_SPEC"
- },
- {
- "ArchStdEvent": "DP_SPEC"
- },
- {
- "ArchStdEvent": "ASE_SPEC"
- },
- {
- "ArchStdEvent": "VFP_SPEC"
- },
- {
- "ArchStdEvent": "PC_WRITE_SPEC"
- },
- {
- "ArchStdEvent": "CRYPTO_SPEC"
- },
- {
- "ArchStdEvent": "BR_IMMED_SPEC"
- },
- {
- "ArchStdEvent": "BR_RETURN_SPEC"
- },
- {
- "ArchStdEvent": "BR_INDIRECT_SPEC"
- },
- {
- "ArchStdEvent": "ISB_SPEC"
- },
- {
- "ArchStdEvent": "DSB_SPEC"
- },
- {
- "ArchStdEvent": "DMB_SPEC"
- },
- {
- "ArchStdEvent": "EXC_UNDEF"
- },
- {
- "ArchStdEvent": "EXC_SVC"
- },
- {
- "ArchStdEvent": "EXC_PABORT"
- },
- {
- "ArchStdEvent": "EXC_DABORT"
- },
- {
- "ArchStdEvent": "EXC_IRQ"
- },
- {
- "ArchStdEvent": "EXC_FIQ"
- },
- {
- "ArchStdEvent": "EXC_SMC"
- },
- {
- "ArchStdEvent": "EXC_HVC"
- },
- {
- "ArchStdEvent": "EXC_TRAP_PABORT"
- },
- {
- "ArchStdEvent": "EXC_TRAP_DABORT"
- },
- {
- "ArchStdEvent": "EXC_TRAP_OTHER"
- },
- {
- "ArchStdEvent": "EXC_TRAP_IRQ"
- },
- {
- "ArchStdEvent": "EXC_TRAP_FIQ"
- },
- {
- "ArchStdEvent": "RC_LD_SPEC"
- },
- {
- "ArchStdEvent": "RC_ST_SPEC"
- }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/exception.json
new file mode 100644
index 000000000000..344a2d552ad5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/exception.json
@@ -0,0 +1,47 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF"
+ },
+ {
+ "ArchStdEvent": "EXC_SVC"
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_SMC"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/instruction.json
new file mode 100644
index 000000000000..e42486d406b3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/instruction.json
@@ -0,0 +1,68 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/memory.json
new file mode 100644
index 000000000000..e3d08f1f7c92
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/memory.json
@@ -0,0 +1,20 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/branch.json
new file mode 100644
index 000000000000..2f2d137f5f55
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/branch.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/bus.json
new file mode 100644
index 000000000000..75d850b781ac
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/bus.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/cache.json
new file mode 100644
index 000000000000..118c5cb0674b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/cache.json
@@ -0,0 +1,236 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL_RD"
+ },
+ {
+ "PublicDescription": "Merge in the store buffer",
+ "EventCode": "0xC0",
+ "EventName": "STB_STALL",
+ "BriefDescription": "Merge in the store buffer"
+ },
+ {
+ "PublicDescription": "Level 1 data cache refill started due to prefetch. Counts any linefills from the prefetcher which cause an allocation into the L1 D-cache",
+ "EventCode": "0xC3",
+ "EventName": "L1D_PREF_LINE_FILL",
+ "BriefDescription": "Level 1 data cache refill started due to prefetch. Counts any linefills from the prefetcher which cause an allocation into the L1 D-cache"
+ },
+ {
+ "PublicDescription": "Level 2 cache refill due to prefetch. +//0 If the core is configured with a per-core L2 cache: This event does not count. +//0 If the core is configured without a per-core L2 cache: This event counts the cluster cache event, as defined by L3_PREF_LINE_FILL. +//0 If there is neither a per-core cache nor a cluster cache configured, this event is not implemented",
+ "EventCode": "0xC4",
+ "EventName": "L2D_PREF_LINE_FILL",
+ "BriefDescription": "Level 2 cache refill due to prefetch. +//0 If the core is configured with a per-core L2 cache: This event does not count. +//0 If the core is configured without a per-core L2 cache: This event counts the cluster cache event, as defined by L3_PREF_LINE_FILL. +//0 If there is neither a per-core cache nor a cluster cache configured, this event is not implemented"
+ },
+ {
+ "PublicDescription": "Level 3 cache refill due to prefetch. This event counts any linefills from the hardware prefetcher which cause an allocation into the L3 cache. Note It might not be possible to distinguish between both hardware and software prefetches and also which prefetches cause an allocation. If so, only hardware prefetches should be counted, regardless of whether they allocate. If either the core is configured without a per-core L2 or the cluster is configured without an L3 cache, this event is not implemented",
+ "EventCode": "0xC5",
+ "EventName": "L3_PREF_LINE_FILL",
+ "BriefDescription": "Level 3 cache refill due to prefetch. This event counts any linefills from the hardware prefetcher which cause an allocation into the L3 cache. Note It might not be possible to distinguish between both hardware and software prefetches and also which prefetches cause an allocation. If so, only hardware prefetches should be counted, regardless of whether they allocate. If either the core is configured without a per-core L2 or the cluster is configured without an L3 cache, this event is not implemented"
+ },
+ {
+ "PublicDescription": "L1D entering write stream mode",
+ "EventCode": "0xC6",
+ "EventName": "L1D_WS_MODE_ENTER",
+ "BriefDescription": "L1D entering write stream mode"
+ },
+ {
+ "PublicDescription": "L1D is in write stream mode",
+ "EventCode": "0xC7",
+ "EventName": "L1D_WS_MODE",
+ "BriefDescription": "L1D is in write stream mode"
+ },
+ {
+ "PublicDescription": "Level 2 cache write streaming mode. This event counts for each cycle where the core is in write-streaming mode and not allocating writes into the L2 cache",
+ "EventCode": "0xC8",
+ "EventName": "L2D_WS_MODE",
+ "BriefDescription": "Level 2 cache write streaming mode. This event counts for each cycle where the core is in write-streaming mode and not allocating writes into the L2 cache"
+ },
+ {
+ "PublicDescription": "Level 3 cache write streaming mode. This event counts for each cycle where the core is in write-streaming mode and not allocating writes into the L3 cache",
+ "EventCode": "0xC9",
+ "EventName": "L3D_WS_MODE",
+ "BriefDescription": "Level 3 cache write streaming mode. This event counts for each cycle where the core is in write-streaming mode and not allocating writes into the L3 cache"
+ },
+ {
+ "PublicDescription": "Level 2 TLB last-level walk cache access. This event does not count if the MMU is disabled",
+ "EventCode": "0xCA",
+ "EventName": "TLB_L2TLB_LLWALK_ACCESS",
+ "BriefDescription": "Level 2 TLB last-level walk cache access. This event does not count if the MMU is disabled"
+ },
+ {
+ "PublicDescription": "Level 2 TLB last-level walk cache refill. This event does not count if the MMU is disabled",
+ "EventCode": "0xCB",
+ "EventName": "TLB_L2TLB_LLWALK_REFILL",
+ "BriefDescription": "Level 2 TLB last-level walk cache refill. This event does not count if the MMU is disabled"
+ },
+ {
+ "PublicDescription": "Level 2 TLB level-2 walk cache access. This event counts accesses to the level-2 walk cache where the last-level walk cache has missed. The event only counts when the translation regime of the pagewalk uses level 2 descriptors. This event does not count if the MMU is disabled",
+ "EventCode": "0xCC",
+ "EventName": "TLB_L2TLB_L2WALK_ACCESS",
+ "BriefDescription": "Level 2 TLB level-2 walk cache access. This event counts accesses to the level-2 walk cache where the last-level walk cache has missed. The event only counts when the translation regime of the pagewalk uses level 2 descriptors. This event does not count if the MMU is disabled"
+ },
+ {
+ "PublicDescription": "Level 2 TLB level-2 walk cache refill. This event does not count if the MMU is disabled",
+ "EventCode": "0xCD",
+ "EventName": "TLB_L2TLB_L2WALK_REFILL",
+ "BriefDescription": "Level 2 TLB level-2 walk cache refill. This event does not count if the MMU is disabled"
+ },
+ {
+ "PublicDescription": "Level 2 TLB IPA cache access. This event counts on each access to the IPA cache. +//0 If a single pagewalk needs to make multiple accesses to the IPA cache, each access is counted. +//0 If stage 2 translation is disabled, this event does not count",
+ "EventCode": "0xCE",
+ "EventName": "TLB_L2TLB_S2_ACCESS",
+ "BriefDescription": "Level 2 TLB IPA cache access. This event counts on each access to the IPA cache. +//0 If a single pagewalk needs to make multiple accesses to the IPA cache, each access is counted. +//0 If stage 2 translation is disabled, this event does not count"
+ },
+ {
+ "PublicDescription": "Level 2 TLB IPA cache refill. This event counts on each refill of the IPA cache. +//0 If a single pagewalk needs to make multiple accesses to the IPA cache, each access which causes a refill is counted. +//0 If stage 2 translation is disabled, this event does not count",
+ "EventCode": "0xCF",
+ "EventName": "TLB_L2TLB_S2_REFILL",
+ "BriefDescription": "Level 2 TLB IPA cache refill. This event counts on each refill of the IPA cache. +//0 If a single pagewalk needs to make multiple accesses to the IPA cache, each access which causes a refill is counted. +//0 If stage 2 translation is disabled, this event does not count"
+ },
+ {
+ "PublicDescription": "Unattributable Level 1 data cache write-back. This event occurs when a requestor outside the PE makes a coherency request that results in writeback",
+ "EventCode": "0xF0",
+ "EventName": "L2_L1D_CACHE_WB_UNATT",
+ "BriefDescription": "Unattributable Level 1 data cache write-back. This event occurs when a requestor outside the PE makes a coherency request that results in writeback"
+ },
+ {
+ "PublicDescription": "Unattributable Level 2 data cache access. This event occurs when a requestor outside the PE makes a coherency request that results in level 2 data cache access",
+ "EventCode": "0xF1",
+ "EventName": "L2_L2D_CACHE_UNATT",
+ "BriefDescription": "Unattributable Level 2 data cache access. This event occurs when a requestor outside the PE makes a coherency request that results in level 2 data cache access"
+ },
+ {
+ "PublicDescription": "Unattributable Level 2 data cache access, read. This event occurs when a requestor outside the PE makes a coherency request that results in level 2 data cache read access",
+ "EventCode": "0xF2",
+ "EventName": "L2_L2D_CACHE_RD_UNATT",
+ "BriefDescription": "Unattributable Level 2 data cache access, read. This event occurs when a requestor outside the PE makes a coherency request that results in level 2 data cache read access"
+ },
+ {
+ "PublicDescription": "Unattributable Level 3 data cache access. This event occurs when a requestor outside the PE makes a coherency request that results in level 3 data cache read access",
+ "EventCode": "0xF3",
+ "EventName": "L2_L3D_CACHE_UNATT",
+ "BriefDescription": "Unattributable Level 3 data cache access. This event occurs when a requestor outside the PE makes a coherency request that results in level 3 data cache read access"
+ },
+ {
+ "PublicDescription": "Unattributable Level 3 data cache access, read. This event occurs when a requestor outside the PE makes a coherency request that results in level 3 data cache read access",
+ "EventCode": "0xF4",
+ "EventName": "L2_L3D_CACHE_RD_UNATT",
+ "BriefDescription": "Unattributable Level 3 data cache access, read. This event occurs when a requestor outside the PE makes a coherency request that results in level 3 data cache read access"
+ },
+ {
+ "PublicDescription": "Unattributable Level 3 data or unified cache allocation without refill. This event occurs when a requestor outside the PE makes a coherency request that results in level 3 cache allocate without refill",
+ "EventCode": "0xF5",
+ "EventName": "L2_L3D_CACHE_ALLOC_UNATT",
+ "BriefDescription": "Unattributable Level 3 data or unified cache allocation without refill. This event occurs when a requestor outside the PE makes a coherency request that results in level 3 cache allocate without refill"
+ },
+ {
+ "PublicDescription": "Unattributable Level 3 data or unified cache refill. This event occurs when a requestor outside the PE makes a coherency request that results in level 3 cache refill",
+ "EventCode": "0xF6",
+ "EventName": "L2_L3D_CACHE_REFILL_UNATT",
+ "BriefDescription": "Unattributable Level 3 data or unified cache refill. This event occurs when a requestor outside the PE makes a coherency request that results in level 3 cache refill"
+ },
+ {
+ "PublicDescription": "Level 2 cache stash dropped. This event counts on each stash request received from the interconnect or ACP, that is targeting L2 and gets dropped due to lack of buffer space to hold the request. L2 and L3 cache events (L2D_CACHE*, L3D_CACHE*) The behavior of these events depends on the configuration of the core. If the private L2 cache is present, the L2D_CACHE* events count the activity in the private L2 cache, and the L3D_CACHE* events count the activity in the DSU L3 cache (if present). If the private L2 cache is not present but the DSU L3 cache is present, the L2D_CACHE* events count activity in the DSU L3 cache and the L3D_CACHE* events do not count. The L2D_CACHE_WB, L2D_CACHE_WR and L2D_CACHE_REFILL_WR events do not count in this configuration. If neither the private L2 cache nor the DSU L3 cache are present, neither the L2D_CACHE* or L3D_CACHE* events will count",
+ "EventCode": "0xF7",
+ "EventName": "L2D_CACHE_STASH_DROPPED",
+ "BriefDescription": "Level 2 cache stash dropped. This event counts on each stash request received from the interconnect or ACP, that is targeting L2 and gets dropped due to lack of buffer space to hold the request. L2 and L3 cache events (L2D_CACHE*, L3D_CACHE*) The behavior of these events depends on the configuration of the core. If the private L2 cache is present, the L2D_CACHE* events count the activity in the private L2 cache, and the L3D_CACHE* events count the activity in the DSU L3 cache (if present). If the private L2 cache is not present but the DSU L3 cache is present, the L2D_CACHE* events count activity in the DSU L3 cache and the L3D_CACHE* events do not count. The L2D_CACHE_WB, L2D_CACHE_WR and L2D_CACHE_REFILL_WR events do not count in this configuration. If neither the private L2 cache nor the DSU L3 cache are present, neither the L2D_CACHE* or L3D_CACHE* events will count"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/dpu.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/dpu.json
new file mode 100644
index 000000000000..b8e402a91bdd
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/dpu.json
@@ -0,0 +1,32 @@
+[
+ {
+ "PublicDescription": "Instruction retired, indirect branch, mispredicted",
+ "EventCode": "0xE9",
+ "EventName": "DPU_BR_IND_MIS",
+ "BriefDescription": "Instruction retired, indirect branch, mispredicted"
+ },
+ {
+ "PublicDescription": "Instruction retired, conditional branch, mispredicted",
+ "EventCode": "0xEA",
+ "EventName": "DPU_BR_COND_MIS",
+ "BriefDescription": "Instruction retired, conditional branch, mispredicted"
+ },
+ {
+ "PublicDescription": "Memory error (any type) from IFU",
+ "EventCode": "0xEB",
+ "EventName": "DPU_MEM_ERR_IFU",
+ "BriefDescription": "Memory error (any type) from IFU"
+ },
+ {
+ "PublicDescription": "Memory error (any type) from DCU",
+ "EventCode": "0xEC",
+ "EventName": "DPU_MEM_ERR_DCU",
+ "BriefDescription": "Memory error (any type) from DCU"
+ },
+ {
+ "PublicDescription": "Memory error (any type) from TLB",
+ "EventCode": "0xED",
+ "EventName": "DPU_MEM_ERR_TLB",
+ "BriefDescription": "Memory error (any type) from TLB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/exception.json
new file mode 100644
index 000000000000..27c3fe9c831a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/exception.json
@@ -0,0 +1,14 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/ifu.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/ifu.json
new file mode 100644
index 000000000000..13178c5dca14
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/ifu.json
@@ -0,0 +1,122 @@
+[
+ {
+ "PublicDescription": "I-Cache miss on an access from the prefetch block",
+ "EventCode": "0xD0",
+ "EventName": "IFU_IC_MISS_WAIT",
+ "BriefDescription": "I-Cache miss on an access from the prefetch block"
+ },
+ {
+ "PublicDescription": "Counts the cycles spent on a request for Level 2 TLB lookup after a Level 1l ITLB miss",
+ "EventCode": "0xD1",
+ "EventName": "IFU_IUTLB_MISS_WAIT",
+ "BriefDescription": "Counts the cycles spent on a request for Level 2 TLB lookup after a Level 1l ITLB miss"
+ },
+ {
+ "PublicDescription": "Micro-predictor conditional/direction mispredict, with respect to. if3/if4 predictor",
+ "EventCode": "0xD2",
+ "EventName": "IFU_MICRO_COND_MISPRED",
+ "BriefDescription": "Micro-predictor conditional/direction mispredict, with respect to. if3/if4 predictor"
+ },
+ {
+ "PublicDescription": "Micro-predictor address mispredict, with respect to if3/if4 predictor",
+ "EventCode": "0xD3",
+ "EventName": "IFU_MICRO_CADDR_MISPRED",
+ "BriefDescription": "Micro-predictor address mispredict, with respect to if3/if4 predictor"
+ },
+ {
+ "PublicDescription": "Micro-predictor hit with immediate redirect",
+ "EventCode": "0xD4",
+ "EventName": "IFU_MICRO_HIT",
+ "BriefDescription": "Micro-predictor hit with immediate redirect"
+ },
+ {
+ "PublicDescription": "Micro-predictor negative cache hit",
+ "EventCode": "0xD6",
+ "EventName": "IFU_MICRO_NEG_HIT",
+ "BriefDescription": "Micro-predictor negative cache hit"
+ },
+ {
+ "PublicDescription": "Micro-predictor correction",
+ "EventCode": "0xD7",
+ "EventName": "IFU_MICRO_CORRECTION",
+ "BriefDescription": "Micro-predictor correction"
+ },
+ {
+ "PublicDescription": "A 2nd instruction could have been pushed but was not because it was nonsequential",
+ "EventCode": "0xD8",
+ "EventName": "IFU_MICRO_NO_INSTR1",
+ "BriefDescription": "A 2nd instruction could have been pushed but was not because it was nonsequential"
+ },
+ {
+ "PublicDescription": "Micro-predictor miss",
+ "EventCode": "0xD9",
+ "EventName": "IFU_MICRO_NO_PRED",
+ "BriefDescription": "Micro-predictor miss"
+ },
+ {
+ "PublicDescription": "Thread flushed due to TLB miss",
+ "EventCode": "0xDA",
+ "EventName": "IFU_FLUSHED_TLB_MISS",
+ "BriefDescription": "Thread flushed due to TLB miss"
+ },
+ {
+ "PublicDescription": "Thread flushed due to reasons other than TLB miss",
+ "EventCode": "0xDB",
+ "EventName": "IFU_FLUSHED_EXCL_TLB_MISS",
+ "BriefDescription": "Thread flushed due to reasons other than TLB miss"
+ },
+ {
+ "PublicDescription": "This thread and the other thread both ready for scheduling in if0",
+ "EventCode": "0xDC",
+ "EventName": "IFU_ALL_THRDS_RDY",
+ "BriefDescription": "This thread and the other thread both ready for scheduling in if0"
+ },
+ {
+ "PublicDescription": "This thread was arbitrated when the other thread was also ready for scheduling",
+ "EventCode": "0xDD",
+ "EventName": "IFU_WIN_ARB_OTHER_RDY",
+ "BriefDescription": "This thread was arbitrated when the other thread was also ready for scheduling"
+ },
+ {
+ "PublicDescription": "This thread was arbitrated when the other thread was also active, but not necessarily ready. For example, waiting for I-Cache or TLB",
+ "EventCode": "0xDE",
+ "EventName": "IFU_WIN_ARB_OTHER_ACT",
+ "BriefDescription": "This thread was arbitrated when the other thread was also active, but not necessarily ready. For example, waiting for I-Cache or TLB"
+ },
+ {
+ "PublicDescription": "This thread was not arbitrated because it was not ready for scheduling. For example, due to a cache miss or TLB miss",
+ "EventCode": "0xDF",
+ "EventName": "IFU_NOT_RDY_FOR_ARB",
+ "BriefDescription": "This thread was not arbitrated because it was not ready for scheduling. For example, due to a cache miss or TLB miss"
+ },
+ {
+ "PublicDescription": "The thread moved from an active state to an inactive state (long-term sleep state, causing deallocation of some resources)",
+ "EventCode": "0xE0",
+ "EventName": "IFU_GOTO_IDLE",
+ "BriefDescription": "The thread moved from an active state to an inactive state (long-term sleep state, causing deallocation of some resources)"
+ },
+ {
+ "PublicDescription": "I-Cache lookup under miss from other thread",
+ "EventCode": "0xE1",
+ "EventName": "IFU_IC_LOOKUP_UNDER_MISS",
+ "BriefDescription": "I-Cache lookup under miss from other thread"
+ },
+ {
+ "PublicDescription": "I-Cache miss under miss from other thread",
+ "EventCode": "0xE2",
+ "EventName": "IFU_IC_MISS_UNDER_MISS",
+ "BriefDescription": "I-Cache miss under miss from other thread"
+ },
+ {
+ "PublicDescription": "This thread pushed an instruction into the IQ",
+ "EventCode": "0xE3",
+ "EventName": "IFU_INSTR_PUSHED",
+ "BriefDescription": "This thread pushed an instruction into the IQ"
+ },
+ {
+ "PublicDescription": "I-Cache Speculative line fill",
+ "EventCode": "0xE4",
+ "EventName": "IFU_IC_LF_SP",
+ "BriefDescription": "I-Cache Speculative line fill"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/instruction.json
new file mode 100644
index 000000000000..2e0d60779dce
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/instruction.json
@@ -0,0 +1,71 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "LD_RETIRED"
+ },
+ {
+ "ArchStdEvent": "ST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "PublicDescription": "Instruction retired, conditional branch",
+ "EventCode": "0xE8",
+ "EventName": "DPU_BR_COND_RETIRED",
+ "BriefDescription": "Instruction retired, conditional branch"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/memory.json
new file mode 100644
index 000000000000..18d527f7fad4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/memory.json
@@ -0,0 +1,35 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ },
+ {
+ "PublicDescription": "External memory request",
+ "EventCode": "0xC1",
+ "EventName": "BIU_EXT_MEM_REQ",
+ "BriefDescription": "External memory request"
+ },
+ {
+ "PublicDescription": "External memory request to non-cacheable memory",
+ "EventCode": "0xC2",
+ "EventName": "BIU_EXT_MEM_REQ_NC",
+ "BriefDescription": "External memory request to non-cacheable memory"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/pipeline.json
new file mode 100644
index 000000000000..eeac798d403a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65/pipeline.json
@@ -0,0 +1,8 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/branch.json
new file mode 100644
index 000000000000..2f2d137f5f55
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/branch.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/bus.json
new file mode 100644
index 000000000000..579c1c993d17
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/bus.json
@@ -0,0 +1,20 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "CNT_CYCLES"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/cache.json
new file mode 100644
index 000000000000..0141f749bff3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/cache.json
@@ -0,0 +1,155 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_LMISS"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_LMISS_RD"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/exception.json
new file mode 100644
index 000000000000..344a2d552ad5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/exception.json
@@ -0,0 +1,47 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF"
+ },
+ {
+ "ArchStdEvent": "EXC_SVC"
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_SMC"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/instruction.json
new file mode 100644
index 000000000000..964f47c6b099
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/instruction.json
@@ -0,0 +1,134 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_HP_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_SP_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_EMPTY_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_FULL_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_PARTIAL_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_NOT_FULL_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_LDFF_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_LDFF_FAULT_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_SCALE_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_FIXED_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT8_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT16_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT32_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT64_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/memory.json
new file mode 100644
index 000000000000..7b2b21ac150f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/memory.json
@@ -0,0 +1,41 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "LD_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "ST_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/pipeline.json
new file mode 100644
index 000000000000..f9fae15f7555
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/pipeline.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_MEM"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/trace.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/trace.json
new file mode 100644
index 000000000000..3116135c59e2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/trace.json
@@ -0,0 +1,29 @@
+[
+ {
+ "ArchStdEvent": "TRB_WRAP"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT0"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT1"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT2"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT3"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT4"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT5"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT6"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT7"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/branch.json
new file mode 100644
index 000000000000..ece201718284
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/branch.json
@@ -0,0 +1,11 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/bus.json
new file mode 100644
index 000000000000..103bb2535775
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/bus.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_SHARED"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_NOT_SHARED"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_NORMAL"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_PERIPH"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/cache.json
new file mode 100644
index 000000000000..b9b3d3fb07b2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/cache.json
@@ -0,0 +1,107 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ },
+ {
+ "PublicDescription": "Number of ways read in the instruction cache - Tag RAM",
+ "EventCode": "0xC2",
+ "EventName": "I_TAG_RAM_RD",
+ "BriefDescription": "Number of ways read in the instruction cache - Tag RAM"
+ },
+ {
+ "PublicDescription": "Number of ways read in the instruction cache - Data RAM",
+ "EventCode": "0xC3",
+ "EventName": "I_DATA_RAM_RD",
+ "BriefDescription": "Number of ways read in the instruction cache - Data RAM"
+ },
+ {
+ "PublicDescription": "Number of ways read in the instruction BTAC RAM",
+ "EventCode": "0xC4",
+ "EventName": "I_BTAC_RAM_RD",
+ "BriefDescription": "Number of ways read in the instruction BTAC RAM"
+ },
+ {
+ "PublicDescription": "Level 1 PLD TLB refill",
+ "EventCode": "0xE7",
+ "EventName": "PLD_UTLB_REFILL",
+ "BriefDescription": "Level 1 PLD TLB refill"
+ },
+ {
+ "PublicDescription": "Level 1 CP15 TLB refill",
+ "EventCode": "0xE8",
+ "EventName": "CP15_UTLB_REFILL",
+ "BriefDescription": "Level 1 CP15 TLB refill"
+ },
+ {
+ "PublicDescription": "Level 1 TLB flush",
+ "EventCode": "0xE9",
+ "EventName": "UTLB_FLUSH",
+ "BriefDescription": "Level 1 TLB flush"
+ },
+ {
+ "PublicDescription": "Level 2 TLB access",
+ "EventCode": "0xEA",
+ "EventName": "TLB_ACCESS",
+ "BriefDescription": "Level 2 TLB access"
+ },
+ {
+ "PublicDescription": "Level 2 TLB miss",
+ "EventCode": "0xEB",
+ "EventName": "TLB_MISS",
+ "BriefDescription": "Level 2 TLB miss"
+ },
+ {
+ "PublicDescription": "Data cache hit in itself due to VIPT aliasing",
+ "EventCode": "0xEC",
+ "EventName": "DCACHE_SELF_HIT_VIPT",
+ "BriefDescription": "Data cache hit in itself due to VIPT aliasing"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/etm.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/etm.json
new file mode 100644
index 000000000000..fce852e82369
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/etm.json
@@ -0,0 +1,14 @@
+[
+ {
+ "PublicDescription": "ETM trace unit output 0",
+ "EventCode": "0xDE",
+ "EventName": "ETM_EXT_OUT0",
+ "BriefDescription": "ETM trace unit output 0"
+ },
+ {
+ "PublicDescription": "ETM trace unit output 1",
+ "EventCode": "0xDF",
+ "EventName": "ETM_EXT_OUT1",
+ "BriefDescription": "ETM trace unit output 1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/exception.json
new file mode 100644
index 000000000000..b77f1228873d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/exception.json
@@ -0,0 +1,14 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ },
+ {
+ "PublicDescription": "Number of Traps to hypervisor",
+ "EventCode": "0xDC",
+ "EventName": "EXC_TRAP_HYP",
+ "BriefDescription": "Number of Traps to hypervisor"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/instruction.json
new file mode 100644
index 000000000000..91a7863ddc9a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/instruction.json
@@ -0,0 +1,65 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/memory.json
new file mode 100644
index 000000000000..34e9cab7f0b9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/memory.json
@@ -0,0 +1,14 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/mmu.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/mmu.json
new file mode 100644
index 000000000000..b85c9cc81f23
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/mmu.json
@@ -0,0 +1,44 @@
+[
+ {
+ "PublicDescription": "Duration of a translation table walk handled by the MMU",
+ "EventCode": "0xE0",
+ "EventName": "MMU_PTW",
+ "BriefDescription": "Duration of a translation table walk handled by the MMU"
+ },
+ {
+ "PublicDescription": "Duration of a Stage 1 translation table walk handled by the MMU",
+ "EventCode": "0xE1",
+ "EventName": "MMU_PTW_ST1",
+ "BriefDescription": "Duration of a Stage 1 translation table walk handled by the MMU"
+ },
+ {
+ "PublicDescription": "Duration of a Stage 2 translation table walk handled by the MMU",
+ "EventCode": "0xE2",
+ "EventName": "MMU_PTW_ST2",
+ "BriefDescription": "Duration of a Stage 2 translation table walk handled by the MMU"
+ },
+ {
+ "PublicDescription": "Duration of a translation table walk requested by the LSU",
+ "EventCode": "0xE3",
+ "EventName": "MMU_PTW_LSU",
+ "BriefDescription": "Duration of a translation table walk requested by the LSU"
+ },
+ {
+ "PublicDescription": "Duration of a translation table walk requested by the Instruction Side",
+ "EventCode": "0xE4",
+ "EventName": "MMU_PTW_ISIDE",
+ "BriefDescription": "Duration of a translation table walk requested by the Instruction Side"
+ },
+ {
+ "PublicDescription": "Duration of a translation table walk requested by a Preload instruction or Prefetch request",
+ "EventCode": "0xE5",
+ "EventName": "MMU_PTW_PLD",
+ "BriefDescription": "Duration of a translation table walk requested by a Preload instruction or Prefetch request"
+ },
+ {
+ "PublicDescription": "Duration of a translation table walk requested by a CP15 operation (maintenance by MVA and VA to PA operations)",
+ "EventCode": "0xE6",
+ "EventName": "MMU_PTW_CP15",
+ "BriefDescription": "Duration of a translation table walk requested by a CP15 operation (maintenance by MVA and VA to PA operations)"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/pipeline.json
new file mode 100644
index 000000000000..1730969e49f7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/pipeline.json
@@ -0,0 +1,38 @@
+[
+ {
+ "PublicDescription": "A linefill caused an instruction side stall",
+ "EventCode": "0xC0",
+ "EventName": "LF_STALL",
+ "BriefDescription": "A linefill caused an instruction side stall"
+ },
+ {
+ "PublicDescription": "A translation table walk caused an instruction side stall",
+ "EventCode": "0xC1",
+ "EventName": "PTW_STALL",
+ "BriefDescription": "A translation table walk caused an instruction side stall"
+ },
+ {
+ "PublicDescription": "Duration for which all slots in the Load-Store Unit are busy",
+ "EventCode": "0xD3",
+ "EventName": "D_LSU_SLOT_FULL",
+ "BriefDescription": "Duration for which all slots in the Load-Store Unit are busy"
+ },
+ {
+ "PublicDescription": "Duration for which all slots in the load-store issue queue are busy",
+ "EventCode": "0xD8",
+ "EventName": "LS_IQ_FULL",
+ "BriefDescription": "Duration for which all slots in the load-store issue queue are busy"
+ },
+ {
+ "PublicDescription": "Duration for which all slots in the data processing issue queue are busy",
+ "EventCode": "0xD9",
+ "EventName": "DP_IQ_FULL",
+ "BriefDescription": "Duration for which all slots in the data processing issue queue are busy"
+ },
+ {
+ "PublicDescription": "Duration for which all slots in the Data Engine issue queue are busy",
+ "EventCode": "0xDA",
+ "EventName": "DE_IQ_FULL",
+ "BriefDescription": "Duration for which all slots in the Data Engine issue queue are busy"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/branch.json
new file mode 100644
index 000000000000..ece201718284
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/branch.json
@@ -0,0 +1,11 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/bus.json
new file mode 100644
index 000000000000..75d850b781ac
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/bus.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/cache.json
new file mode 100644
index 000000000000..7efa09800a51
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/cache.json
@@ -0,0 +1,164 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "L2I_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL_RD"
+ },
+ {
+ "PublicDescription": "Number of ways read in the instruction cache - Tag RAM",
+ "EventCode": "0xC2",
+ "EventName": "I_TAG_RAM_RD",
+ "BriefDescription": "Number of ways read in the instruction cache - Tag RAM"
+ },
+ {
+ "PublicDescription": "Number of ways read in the instruction cache - Data RAM",
+ "EventCode": "0xC3",
+ "EventName": "I_DATA_RAM_RD",
+ "BriefDescription": "Number of ways read in the instruction cache - Data RAM"
+ },
+ {
+ "PublicDescription": "Number of ways read in the instruction BTAC RAM",
+ "EventCode": "0xC4",
+ "EventName": "I_BTAC_RAM_RD",
+ "BriefDescription": "Number of ways read in the instruction BTAC RAM"
+ },
+ {
+ "PublicDescription": "Level 1 PLD TLB refill",
+ "EventCode": "0xE7",
+ "EventName": "L1PLD_TLB_REFILL",
+ "BriefDescription": "Level 1 PLD TLB refill"
+ },
+ {
+ "PublicDescription": "Level 2 preload and MMU prefetcher TLB access. This event only counts software and hardware prefetches at Level 2",
+ "EventCode": "0xE8",
+ "EventName": "L2PLD_TLB",
+ "BriefDescription": "Level 2 preload and MMU prefetcher TLB access. This event only counts software and hardware prefetches at Level 2"
+ },
+ {
+ "PublicDescription": "Level 1 TLB flush",
+ "EventCode": "0xE9",
+ "EventName": "UTLB_FLUSH",
+ "BriefDescription": "Level 1 TLB flush"
+ },
+ {
+ "PublicDescription": "Level 2 TLB access",
+ "EventCode": "0xEA",
+ "EventName": "TLB_ACCESS",
+ "BriefDescription": "Level 2 TLB access"
+ },
+ {
+ "PublicDescription": "Level 1 preload TLB access. This event only counts software and hardware prefetches at Level 1. This event counts all accesses to the preload data micro TLB, that is L1 prefetcher and preload instructions. This event does not take into account whether the MMU is enabled or not",
+ "EventCode": "0xEB",
+ "EventName": "L1PLD_TLB",
+ "BriefDescription": "Level 1 preload TLB access. This event only counts software and hardware prefetches at Level 1. This event counts all accesses to the preload data micro TLB, that is L1 prefetcher and preload instructions. This event does not take into account whether the MMU is enabled or not"
+ },
+ {
+ "PublicDescription": "Prefetch access to unified TLB that caused a page table walk. This event counts software and hardware prefetches",
+ "EventCode": "0xEC",
+ "EventName": "PLDTLB_WALK",
+ "BriefDescription": "Prefetch access to unified TLB that caused a page table walk. This event counts software and hardware prefetches"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/etm.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/etm.json
new file mode 100644
index 000000000000..fce852e82369
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/etm.json
@@ -0,0 +1,14 @@
+[
+ {
+ "PublicDescription": "ETM trace unit output 0",
+ "EventCode": "0xDE",
+ "EventName": "ETM_EXT_OUT0",
+ "BriefDescription": "ETM trace unit output 0"
+ },
+ {
+ "PublicDescription": "ETM trace unit output 1",
+ "EventCode": "0xDF",
+ "EventName": "ETM_EXT_OUT1",
+ "BriefDescription": "ETM trace unit output 1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/exception.json
new file mode 100644
index 000000000000..5b04d01de703
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/exception.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ },
+ {
+ "PublicDescription": "Number of traps to hypervisor. This event counts the number of exception traps taken to EL2, excluding HVC instructions. This event is set every time that an exception is executed because of a decoded trap to the hypervisor. CCFAIL exceptions and traps caused by HVC instructions are excluded. This event is not counted when it is accessible from Non-secure EL0 or EL1",
+ "EventCode": "0xDC",
+ "EventName": "EXC_TRAP_HYP",
+ "BriefDescription": "Number of traps to hypervisor. This event counts the number of exception traps taken to EL2, excluding HVC instructions. This event is set every time that an exception is executed because of a decoded trap to the hypervisor. CCFAIL exceptions and traps caused by HVC instructions are excluded. This event is not counted when it is accessible from Non-secure EL0 or EL1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/instruction.json
new file mode 100644
index 000000000000..930ce8a259f3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/instruction.json
@@ -0,0 +1,74 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/memory.json
new file mode 100644
index 000000000000..929fc545470f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/memory.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/mmu.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/mmu.json
new file mode 100644
index 000000000000..0e63e68bc8cb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/mmu.json
@@ -0,0 +1,44 @@
+[
+ {
+ "PublicDescription": "Duration of a translation table walk handled by the MMU",
+ "EventCode": "0xE0",
+ "EventName": "MMU_PTW",
+ "BriefDescription": "Duration of a translation table walk handled by the MMU"
+ },
+ {
+ "PublicDescription": "Duration of a Stage 1 translation table walk handled by the MMU. This event is not counted when it is accessible from Non-secure EL0 or EL1",
+ "EventCode": "0xE1",
+ "EventName": "MMU_PTW_ST1",
+ "BriefDescription": "Duration of a Stage 1 translation table walk handled by the MMU. This event is not counted when it is accessible from Non-secure EL0 or EL1"
+ },
+ {
+ "PublicDescription": "Duration of a Stage 2 translation table walk handled by the MMU. This event is not counted when it is accessible from Non-secure EL0 or EL1",
+ "EventCode": "0xE2",
+ "EventName": "MMU_PTW_ST2",
+ "BriefDescription": "Duration of a Stage 2 translation table walk handled by the MMU. This event is not counted when it is accessible from Non-secure EL0 or EL1"
+ },
+ {
+ "PublicDescription": "Duration of a translation table walk requested by the LSU",
+ "EventCode": "0xE3",
+ "EventName": "MMU_PTW_LSU",
+ "BriefDescription": "Duration of a translation table walk requested by the LSU"
+ },
+ {
+ "PublicDescription": "Duration of a translation table walk requested by the instruction side",
+ "EventCode": "0xE4",
+ "EventName": "MMU_PTW_ISIDE",
+ "BriefDescription": "Duration of a translation table walk requested by the instruction side"
+ },
+ {
+ "PublicDescription": "Duration of a translation table walk requested by a Preload instruction or Prefetch request",
+ "EventCode": "0xE5",
+ "EventName": "MMU_PTW_PLD",
+ "BriefDescription": "Duration of a translation table walk requested by a Preload instruction or Prefetch request"
+ },
+ {
+ "PublicDescription": "Duration of a translation table walk requested by an address translation operation",
+ "EventCode": "0xE6",
+ "EventName": "MMU_PTW_CP15",
+ "BriefDescription": "Duration of a translation table walk requested by an address translation operation"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/pipeline.json
new file mode 100644
index 000000000000..0f8f50823cf1
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/pipeline.json
@@ -0,0 +1,44 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ },
+ {
+ "PublicDescription": "A linefill caused an instruction side stall",
+ "EventCode": "0xC0",
+ "EventName": "LF_STALL",
+ "BriefDescription": "A linefill caused an instruction side stall"
+ },
+ {
+ "PublicDescription": "A translation table walk caused an instruction side stall",
+ "EventCode": "0xC1",
+ "EventName": "PTW_STALL",
+ "BriefDescription": "A translation table walk caused an instruction side stall"
+ },
+ {
+ "PublicDescription": "Duration for which all slots in the Load-Store Unit (LSU) are busy",
+ "EventCode": "0xD3",
+ "EventName": "D_LSU_SLOT_FULL",
+ "BriefDescription": "Duration for which all slots in the Load-Store Unit (LSU) are busy"
+ },
+ {
+ "PublicDescription": "Duration for which all slots in the load-store issue queue are busy. This event counts the cycles where all slots in the LS IQs are full with micro-operations waiting for issuing, and the dispatch stage is not empty",
+ "EventCode": "0xD8",
+ "EventName": "LS_IQ_FULL",
+ "BriefDescription": "Duration for which all slots in the load-store issue queue are busy. This event counts the cycles where all slots in the LS IQs are full with micro-operations waiting for issuing, and the dispatch stage is not empty"
+ },
+ {
+ "PublicDescription": "Duration for which all slots in the data processing issue queue are busy. This event counts the cycles where all slots in the DP0 and DP1 IQs are full with micro-operations waiting for issuing, and the despatch stage is not empty",
+ "EventCode": "0xD9",
+ "EventName": "DP_IQ_FULL",
+ "BriefDescription": "Duration for which all slots in the data processing issue queue are busy. This event counts the cycles where all slots in the DP0 and DP1 IQs are full with micro-operations waiting for issuing, and the despatch stage is not empty"
+ },
+ {
+ "PublicDescription": "Duration for which all slots in the data engine issue queue are busy. This event is set every time that the data engine rename has at least one valid instruction, excluding No Operations (NOPs), that cannot move to the issue stage because accpt_instr is LOW",
+ "EventCode": "0xDA",
+ "EventName": "DE_IQ_FULL",
+ "BriefDescription": "Duration for which all slots in the data engine issue queue are busy. This event is set every time that the data engine rename has at least one valid instruction, excluding No Operations (NOPs), that cannot move to the issue stage because accpt_instr is LOW"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/branch.json
new file mode 100644
index 000000000000..2f2d137f5f55
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/branch.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/bus.json
new file mode 100644
index 000000000000..75d850b781ac
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/bus.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/cache.json
new file mode 100644
index 000000000000..cbb365f5091f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/cache.json
@@ -0,0 +1,143 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/exception.json
new file mode 100644
index 000000000000..344a2d552ad5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/exception.json
@@ -0,0 +1,47 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF"
+ },
+ {
+ "ArchStdEvent": "EXC_SVC"
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_SMC"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/instruction.json
new file mode 100644
index 000000000000..1a74786271d4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/instruction.json
@@ -0,0 +1,77 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/memory.json
new file mode 100644
index 000000000000..5aff6e93c1ad
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/memory.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/pipeline.json
new file mode 100644
index 000000000000..eeac798d403a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/pipeline.json
@@ -0,0 +1,8 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/branch.json
new file mode 100644
index 000000000000..2f2d137f5f55
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/branch.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/bus.json
new file mode 100644
index 000000000000..579c1c993d17
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/bus.json
@@ -0,0 +1,20 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "CNT_CYCLES"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/cache.json
new file mode 100644
index 000000000000..0141f749bff3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/cache.json
@@ -0,0 +1,155 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_LMISS"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_LMISS_RD"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/exception.json
new file mode 100644
index 000000000000..344a2d552ad5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/exception.json
@@ -0,0 +1,47 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF"
+ },
+ {
+ "ArchStdEvent": "EXC_SVC"
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_SMC"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/instruction.json
new file mode 100644
index 000000000000..a9edd52843a1
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/instruction.json
@@ -0,0 +1,80 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/memory.json
new file mode 100644
index 000000000000..5aff6e93c1ad
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/memory.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/pipeline.json
new file mode 100644
index 000000000000..f9fae15f7555
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/pipeline.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_MEM"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/branch.json
new file mode 100644
index 000000000000..2f2d137f5f55
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/branch.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/bus.json
new file mode 100644
index 000000000000..579c1c993d17
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/bus.json
@@ -0,0 +1,20 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "CNT_CYCLES"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/cache.json
new file mode 100644
index 000000000000..0141f749bff3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/cache.json
@@ -0,0 +1,155 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_LMISS"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_LMISS_RD"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/exception.json
new file mode 100644
index 000000000000..344a2d552ad5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/exception.json
@@ -0,0 +1,47 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF"
+ },
+ {
+ "ArchStdEvent": "EXC_SVC"
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_SMC"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/instruction.json
new file mode 100644
index 000000000000..a9edd52843a1
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/instruction.json
@@ -0,0 +1,80 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/memory.json
new file mode 100644
index 000000000000..5aff6e93c1ad
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/memory.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/pipeline.json
new file mode 100644
index 000000000000..f9fae15f7555
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/pipeline.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_MEM"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/branch.json
new file mode 100644
index 000000000000..2f2d137f5f55
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/branch.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/bus.json
new file mode 100644
index 000000000000..579c1c993d17
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/bus.json
@@ -0,0 +1,20 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "CNT_CYCLES"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/cache.json
new file mode 100644
index 000000000000..0141f749bff3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/cache.json
@@ -0,0 +1,155 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_LMISS"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_LMISS_RD"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/exception.json
new file mode 100644
index 000000000000..344a2d552ad5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/exception.json
@@ -0,0 +1,47 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF"
+ },
+ {
+ "ArchStdEvent": "EXC_SVC"
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_SMC"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/instruction.json
new file mode 100644
index 000000000000..964f47c6b099
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/instruction.json
@@ -0,0 +1,134 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_HP_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_SP_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_EMPTY_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_FULL_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_PARTIAL_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_NOT_FULL_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_LDFF_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_LDFF_FAULT_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_SCALE_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_FIXED_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT8_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT16_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT32_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT64_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/memory.json
new file mode 100644
index 000000000000..7b2b21ac150f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/memory.json
@@ -0,0 +1,41 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "LD_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "ST_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/pipeline.json
new file mode 100644
index 000000000000..f9fae15f7555
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/pipeline.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_MEM"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/trace.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/trace.json
new file mode 100644
index 000000000000..3116135c59e2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/trace.json
@@ -0,0 +1,29 @@
+[
+ {
+ "ArchStdEvent": "TRB_WRAP"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT0"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT1"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT2"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT3"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT4"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT5"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT6"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT7"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/branch.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/branch.json
new file mode 100644
index 000000000000..2f2d137f5f55
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/branch.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/bus.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/bus.json
new file mode 100644
index 000000000000..75d850b781ac
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/bus.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/cache.json
new file mode 100644
index 000000000000..3ad15e3a93a9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/cache.json
@@ -0,0 +1,107 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL_RD"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/exception.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/exception.json
new file mode 100644
index 000000000000..27c3fe9c831a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/exception.json
@@ -0,0 +1,14 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/instruction.json
new file mode 100644
index 000000000000..6c3b8f772e7f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/instruction.json
@@ -0,0 +1,65 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "LD_RETIRED"
+ },
+ {
+ "ArchStdEvent": "ST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/memory.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/memory.json
new file mode 100644
index 000000000000..78ed6dfcedc1
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/memory.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/pipeline.json
new file mode 100644
index 000000000000..eeac798d403a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/pipeline.json
@@ -0,0 +1,8 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/spe.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/spe.json
new file mode 100644
index 000000000000..20f2165c85fe
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-e1/spe.json
@@ -0,0 +1,14 @@
+[
+ {
+ "ArchStdEvent": "SAMPLE_POP"
+ },
+ {
+ "ArchStdEvent": "SAMPLE_FEED"
+ },
+ {
+ "ArchStdEvent": "SAMPLE_FILTRATE"
+ },
+ {
+ "ArchStdEvent": "SAMPLE_COLLISION"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/common-and-microarch.json b/tools/perf/pmu-events/arch/arm64/common-and-microarch.json
index 80d7a70829a0..492083b99256 100644
--- a/tools/perf/pmu-events/arch/arm64/common-and-microarch.json
+++ b/tools/perf/pmu-events/arch/arm64/common-and-microarch.json
@@ -36,6 +36,18 @@
"BriefDescription": "Attributable Level 1 data TLB refill"
},
{
+ "PublicDescription": "Instruction architecturally executed, condition code check pass, load",
+ "EventCode": "0x06",
+ "EventName": "LD_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, condition code check pass, load"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, condition code check pass, store",
+ "EventCode": "0x07",
+ "EventName": "ST_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, condition code check pass, store"
+ },
+ {
"PublicDescription": "Instruction architecturally executed",
"EventCode": "0x08",
"EventName": "INST_RETIRED",
@@ -60,6 +72,30 @@
"BriefDescription": "Instruction architecturally executed, condition code check pass, write to CONTEXTIDR"
},
{
+ "PublicDescription": "Instruction architecturally executed, condition code check pass, software change of the PC",
+ "EventCode": "0x0C",
+ "EventName": "PC_WRITE_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, condition code check pass, software change of the PC"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, immediate branch",
+ "EventCode": "0x0D",
+ "EventName": "BR_IMMED_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, immediate branch"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, condition code check pass, procedure return",
+ "EventCode": "0x0E",
+ "EventName": "BR_RETURN_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, condition code check pass, procedure return"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, condition code check pass, unaligned",
+ "EventCode": "0x0F",
+ "EventName": "UNALIGNED_LDST_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, condition code check pass, unaligned"
+ },
+ {
"PublicDescription": "Mispredicted or not predicted branch speculatively executed",
"EventCode": "0x10",
"EventName": "BR_MIS_PRED",
@@ -144,6 +180,12 @@
"BriefDescription": "Bus cycle"
},
{
+ "PublicDescription": "Level 1 data cache allocation without refill",
+ "EventCode": "0x1F",
+ "EventName": "L1D_CACHE_ALLOCATE",
+ "BriefDescription": "Level 1 data cache allocation without refill"
+ },
+ {
"PublicDescription": "Attributable Level 2 data cache allocation without refill",
"EventCode": "0x20",
"EventName": "L2D_CACHE_ALLOCATE",
@@ -258,6 +300,12 @@
"BriefDescription": "Last level cache miss, read"
},
{
+ "PublicDescription": "Attributable memory read access to another socket in a multi-socket system",
+ "EventCode": "0x38",
+ "EventName": "REMOTE_ACCESS_RD",
+ "BriefDescription": "Attributable memory read access to another socket in a multi-socket system"
+ },
+ {
"PublicDescription": "Level 1 data cache long-latency read miss. The counter counts each memory read access counted by L1D_CACHE that incurs additional latency because it returns data from outside the Level 1 data or unified cache of this processing element.",
"EventCode": "0x39",
"EventName": "L1D_CACHE_LMISS_RD",
@@ -360,6 +408,24 @@
"BriefDescription": "Trace buffer current write pointer wrapped"
},
{
+ "PublicDescription": "PMU overflow, counters accessible to EL1 and EL0",
+ "EventCode": "0x400D",
+ "EventName": "PMU_OVFS",
+ "BriefDescription": "PMU overflow, counters accessible to EL1 and EL0"
+ },
+ {
+ "PublicDescription": "Trace buffer Trigger Event",
+ "EventCode": "0x400E",
+ "EventName": "TRB_TRIG",
+ "BriefDescription": "Trace buffer Trigger Event"
+ },
+ {
+ "PublicDescription": "PMU overflow, counters reserved for use by EL2",
+ "EventCode": "0x400F",
+ "EventName": "PMU_HOVFS",
+ "BriefDescription": "PMU overflow, counters reserved for use by EL2"
+ },
+ {
"PublicDescription": "PE Trace Unit external output 0",
"EventCode": "0x4010",
"EventName": "TRCEXTOUT0",
diff --git a/tools/perf/pmu-events/arch/arm64/mapfile.csv b/tools/perf/pmu-events/arch/arm64/mapfile.csv
index b899db48c12a..ed29e4433c67 100644
--- a/tools/perf/pmu-events/arch/arm64/mapfile.csv
+++ b/tools/perf/pmu-events/arch/arm64/mapfile.csv
@@ -12,14 +12,27 @@
#
#
#Family-model,Version,Filename,EventType
+0x00000000410fd020,v1,arm/cortex-a34,core
0x00000000410fd030,v1,arm/cortex-a53,core
0x00000000420f1000,v1,arm/cortex-a53,core
+0x00000000410fd040,v1,arm/cortex-a35,core
+0x00000000410fd050,v1,arm/cortex-a55,core
+0x00000000410fd060,v1,arm/cortex-a65,core
0x00000000410fd070,v1,arm/cortex-a57-a72,core
0x00000000410fd080,v1,arm/cortex-a57-a72,core
+0x00000000410fd090,v1,arm/cortex-a73,core
+0x00000000410fd0a0,v1,arm/cortex-a75,core
0x00000000410fd0b0,v1,arm/cortex-a76-n1,core
0x00000000410fd0c0,v1,arm/cortex-a76-n1,core
+0x00000000410fd0d0,v1,arm/cortex-a77,core
0x00000000410fd400,v1,arm/neoverse-v1,core
+0x00000000410fd410,v1,arm/cortex-a78,core
+0x00000000410fd440,v1,arm/cortex-x1,core
+0x00000000410fd460,v1,arm/cortex-a510,core
+0x00000000410fd470,v1,arm/cortex-a710,core
+0x00000000410fd480,v1,arm/cortex-x2,core
0x00000000410fd490,v1,arm/neoverse-n2,core
+0x00000000410fd4a0,v1,arm/neoverse-e1,core
0x00000000420f5160,v1,cavium/thunderx2,core
0x00000000430f0af0,v1,cavium/thunderx2,core
0x00000000460f0010,v1,fujitsu/a64fx,core
diff --git a/tools/perf/pmu-events/arch/s390/cf_z10/basic.json b/tools/perf/pmu-events/arch/s390/cf_z10/basic.json
index 783de7f1aeaa..9bd20a5f47af 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z10/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z10/basic.json
@@ -3,84 +3,84 @@
"Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
- "BriefDescription": "CPU Cycles",
- "PublicDescription": "Cycle Count"
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
- "BriefDescription": "Instructions",
- "PublicDescription": "Instruction Count"
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
- "BriefDescription": "L1I Directory Writes",
- "PublicDescription": "Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
- "BriefDescription": "L1I Penalty Cycles",
- "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
- "BriefDescription": "L1D Directory Writes",
- "PublicDescription": "Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
- "BriefDescription": "L1D Penalty Cycles",
- "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
- "BriefDescription": "Problem-State CPU Cycles",
- "PublicDescription": "Problem-State Cycle Count"
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
- "BriefDescription": "Problem-State Instructions",
- "PublicDescription": "Problem-State Instruction Count"
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "34",
"EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
- "BriefDescription": "Problem-State L1I Directory Writes",
- "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "35",
"EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1I Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 instruction cache or unified cache while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "36",
"EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
- "BriefDescription": "Problem-State L1D Directory Writes",
- "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "37",
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1D Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 data cache while the CPU is in the problem state."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json
index 3f28007d3892..a8d391ddeb8c 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json
@@ -3,112 +3,112 @@
"Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
- "BriefDescription": "PRNG Functions",
- "PublicDescription": "Total number of the PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
- "BriefDescription": "PRNG Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
- "BriefDescription": "PRNG Blocked Functions",
- "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
- "BriefDescription": "PRNG Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
- "BriefDescription": "SHA Functions",
- "PublicDescription": "Total number of SHA functions issued by the CPU"
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
- "BriefDescription": "SHA Cycles",
- "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
- "BriefDescription": "SHA Blocked Functions",
- "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
- "BriefDescription": "SHA Bloced Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
- "BriefDescription": "DEA Functions",
- "PublicDescription": "Total number of the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
- "BriefDescription": "DEA Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
- "BriefDescription": "DEA Blocked Functions",
- "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
- "BriefDescription": "DEA Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
- "BriefDescription": "AES Functions",
- "PublicDescription": "Total number of AES functions issued by the CPU"
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
- "BriefDescription": "AES Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
- "BriefDescription": "AES Blocked Functions",
- "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
- "BriefDescription": "AES Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z10/extended.json b/tools/perf/pmu-events/arch/s390/cf_z10/extended.json
index 86bd8ba9391d..bf6a9811e014 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z10/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z10/extended.json
@@ -4,125 +4,125 @@
"EventCode": "128",
"EventName": "L1I_L2_SOURCED_WRITES",
"BriefDescription": "L1I L2 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from the Level-2 (L1.5) cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from the Level-2 (L1.5) cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "L1D_L2_SOURCED_WRITES",
"BriefDescription": "L1D L2 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from the Level-2 (L1.5) cache"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the installed cache line was sourced from the Level-2 (L1.5) cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "130",
"EventName": "L1I_L3_LOCAL_WRITES",
"BriefDescription": "L1I L3 Local Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the installed cache line was sourced from the Level-3 cache that is on the same book as the Instruction cache (Local L2 cache)"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the installed cache line was sourced from the Level-3 cache that is on the same book as the Instruction cache (Local L2 cache)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "L1D_L3_LOCAL_WRITES",
"BriefDescription": "L1D L3 Local Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installtion cache line was source from the Level-3 cache that is on the same book as the Data cache (Local L2 cache)"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the installed cache line was source from the Level-3 cache that is on the same book as the Data cache (Local L2 cache)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "132",
"EventName": "L1I_L3_REMOTE_WRITES",
"BriefDescription": "L1I L3 Remote Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Instruction cache (Remote L2 cache)"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Instruction cache (Remote L2 cache)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "L1D_L3_REMOTE_WRITES",
"BriefDescription": "L1D L3 Remote Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Data cache (Remote L2 cache)"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Data cache (Remote L2 cache)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "L1D_LMEM_SOURCED_WRITES",
"BriefDescription": "L1D Local Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "L1I_LMEM_SOURCED_WRITES",
"BriefDescription": "L1I Local Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache where the installed cache line was sourced from memory that is attached to the s ame book as the Instruction cache (Local Memory)"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache where the installed cache line was sourced from memory that is attached to the s ame book as the Instruction cache (Local Memory)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+ "PublicDescription": "A directory write to the Level-1 Data Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
},
{
"Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "L1I_CACHELINE_INVALIDATES",
"BriefDescription": "L1I Cacheline Invalidates",
- "PublicDescription": "A cache line in the Level-1 I-Cache has been invalidated by a store on the same CPU as the Level-1 I-Cache"
+ "PublicDescription": "A cache line in the Level-1 Instruction Cache has been invalidated by a store on the same CPU as the Level-1 Instruction Cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "ITLB1_WRITES",
"BriefDescription": "ITLB1 Writes",
- "PublicDescription": "A translation entry has been written into the Level-1 Instruction Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written into the Level-1 Instruction Translation Lookaside Buffer (ITLB1)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "DTLB1_WRITES",
"BriefDescription": "DTLB1 Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer (DTLB1)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "142",
"EventName": "TLB2_CRSTE_HPAGE_WRITES",
"BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation."
},
{
"Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "ITLB1_MISSES",
"BriefDescription": "ITLB1 Misses",
- "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress"
+ "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress."
},
{
"Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "DTLB1_MISSES",
"BriefDescription": "DTLB1 Misses",
- "PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle an DTLB1 miss is in progress"
+ "PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle an DTLB1 miss is in progress."
},
{
"Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L2C_STORES_SENT",
"BriefDescription": "L2C Stores Sent",
- "PublicDescription": "Incremented by one for every store sent to Level-2 (L1.5) cache"
+ "PublicDescription": "Incremented by one for every store sent to Level-2 (L1.5) cache."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/basic.json b/tools/perf/pmu-events/arch/s390/cf_z13/basic.json
index 783de7f1aeaa..9bd20a5f47af 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z13/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z13/basic.json
@@ -3,84 +3,84 @@
"Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
- "BriefDescription": "CPU Cycles",
- "PublicDescription": "Cycle Count"
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
- "BriefDescription": "Instructions",
- "PublicDescription": "Instruction Count"
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
- "BriefDescription": "L1I Directory Writes",
- "PublicDescription": "Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
- "BriefDescription": "L1I Penalty Cycles",
- "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
- "BriefDescription": "L1D Directory Writes",
- "PublicDescription": "Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
- "BriefDescription": "L1D Penalty Cycles",
- "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
- "BriefDescription": "Problem-State CPU Cycles",
- "PublicDescription": "Problem-State Cycle Count"
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
- "BriefDescription": "Problem-State Instructions",
- "PublicDescription": "Problem-State Instruction Count"
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "34",
"EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
- "BriefDescription": "Problem-State L1I Directory Writes",
- "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "35",
"EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1I Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 instruction cache or unified cache while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "36",
"EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
- "BriefDescription": "Problem-State L1D Directory Writes",
- "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "37",
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1D Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 data cache while the CPU is in the problem state."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json
index 3f28007d3892..a8d391ddeb8c 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json
@@ -3,112 +3,112 @@
"Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
- "BriefDescription": "PRNG Functions",
- "PublicDescription": "Total number of the PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
- "BriefDescription": "PRNG Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
- "BriefDescription": "PRNG Blocked Functions",
- "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
- "BriefDescription": "PRNG Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
- "BriefDescription": "SHA Functions",
- "PublicDescription": "Total number of SHA functions issued by the CPU"
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
- "BriefDescription": "SHA Cycles",
- "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
- "BriefDescription": "SHA Blocked Functions",
- "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
- "BriefDescription": "SHA Bloced Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
- "BriefDescription": "DEA Functions",
- "PublicDescription": "Total number of the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
- "BriefDescription": "DEA Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
- "BriefDescription": "DEA Blocked Functions",
- "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
- "BriefDescription": "DEA Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
- "BriefDescription": "AES Functions",
- "PublicDescription": "Total number of AES functions issued by the CPU"
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
- "BriefDescription": "AES Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
- "BriefDescription": "AES Blocked Functions",
- "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
- "BriefDescription": "AES Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/extended.json b/tools/perf/pmu-events/arch/s390/cf_z13/extended.json
index 1a5e4f89c57e..99c1b93a7e36 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z13/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z13/extended.json
@@ -11,7 +11,7 @@
"EventCode": "129",
"EventName": "DTLB1_WRITES",
"BriefDescription": "DTLB1 Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer (DTLB1)."
},
{
"Unit": "CPU-M-CF",
@@ -25,7 +25,7 @@
"EventCode": "131",
"EventName": "DTLB1_HPAGE_WRITES",
"BriefDescription": "DTLB1 One-Megabyte Page Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page"
+ "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page."
},
{
"Unit": "CPU-M-CF",
@@ -39,63 +39,63 @@
"EventCode": "133",
"EventName": "L1D_L2D_SOURCED_WRITES",
"BriefDescription": "L1D L2D Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "ITLB1_WRITES",
"BriefDescription": "ITLB1 Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer (ITLB1)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "ITLB1_MISSES",
"BriefDescription": "ITLB1 Misses",
- "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress"
+ "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress."
},
{
"Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1I_L2I_SOURCED_WRITES",
"BriefDescription": "L1I L2I Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "TLB2_CRSTE_HPAGE_WRITES",
"BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays for a one-megabyte large page translation"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays for a one-megabyte large page translation."
},
{
"Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "TX_C_TEND",
"BriefDescription": "Completed TEND instructions in constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TX_NC_TEND",
"BriefDescription": "Completed TEND instructions in non-constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
@@ -109,273 +109,273 @@
"EventCode": "144",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "L1D_ONNODE_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Node L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L1D_ONNODE_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Node L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "L1D_ONNODE_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Node L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "149",
"EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_ONDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Drawer L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "151",
"EventName": "L1D_ONDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFDRAWER_SCOL_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Same-Column L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Drawer Same-Column L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "154",
"EventName": "L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Same-Column L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1D_OFFDRAWER_FCOL_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "156",
"EventName": "L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "157",
"EventName": "L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "158",
"EventName": "L1D_ONNODE_MEM_SOURCED_WRITES",
"BriefDescription": "L1D On-Node Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Node memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Node memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "159",
"EventName": "L1D_ONDRAWER_MEM_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "160",
"EventName": "L1D_OFFDRAWER_MEM_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "161",
"EventName": "L1D_ONCHIP_MEM_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "162",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "163",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "164",
"EventName": "L1I_ONNODE_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "165",
"EventName": "L1I_ONNODE_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Node L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "166",
"EventName": "L1I_ONNODE_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Node L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "167",
"EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "168",
"EventName": "L1I_ONDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Drawer L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "169",
"EventName": "L1I_ONDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "170",
"EventName": "L1I_OFFDRAWER_SCOL_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Same-Column L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "171",
"EventName": "L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Drawer Same-Column L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "172",
"EventName": "L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Same-Column L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "173",
"EventName": "L1I_OFFDRAWER_FCOL_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Far-Column L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "174",
"EventName": "L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Drawer Far-Column L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "175",
"EventName": "L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Far-Column L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "176",
"EventName": "L1I_ONNODE_MEM_SOURCED_WRITES",
"BriefDescription": "L1I On-Node Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Node memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Node memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "177",
"EventName": "L1I_ONDRAWER_MEM_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "178",
"EventName": "L1I_OFFDRAWER_MEM_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "179",
"EventName": "L1I_ONCHIP_MEM_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Chip memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Chip memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "218",
"EventName": "TX_NC_TABORT",
"BriefDescription": "Aborted transactions in non-constrained TX mode",
- "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode"
+ "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "219",
"EventName": "TX_C_TABORT_NO_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete."
},
{
"Unit": "CPU-M-CF",
"EventCode": "220",
"EventName": "TX_C_TABORT_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete."
},
{
"Unit": "CPU-M-CF",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/basic.json b/tools/perf/pmu-events/arch/s390/cf_z14/basic.json
index fc762e9f1d6e..1023d47028ce 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z14/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/basic.json
@@ -3,56 +3,56 @@
"Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
- "BriefDescription": "CPU Cycles",
- "PublicDescription": "Cycle Count"
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
- "BriefDescription": "Instructions",
- "PublicDescription": "Instruction Count"
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
- "BriefDescription": "L1I Directory Writes",
- "PublicDescription": "Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
- "BriefDescription": "L1I Penalty Cycles",
- "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
- "BriefDescription": "L1D Directory Writes",
- "PublicDescription": "Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
- "BriefDescription": "L1D Penalty Cycles",
- "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
- "BriefDescription": "Problem-State CPU Cycles",
- "PublicDescription": "Problem-State Cycle Count"
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
- "BriefDescription": "Problem-State Instructions",
- "PublicDescription": "Problem-State Instruction Count"
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json
index 3f28007d3892..a8d391ddeb8c 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json
@@ -3,112 +3,112 @@
"Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
- "BriefDescription": "PRNG Functions",
- "PublicDescription": "Total number of the PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
- "BriefDescription": "PRNG Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
- "BriefDescription": "PRNG Blocked Functions",
- "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
- "BriefDescription": "PRNG Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
- "BriefDescription": "SHA Functions",
- "PublicDescription": "Total number of SHA functions issued by the CPU"
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
- "BriefDescription": "SHA Cycles",
- "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
- "BriefDescription": "SHA Blocked Functions",
- "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
- "BriefDescription": "SHA Bloced Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
- "BriefDescription": "DEA Functions",
- "PublicDescription": "Total number of the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
- "BriefDescription": "DEA Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
- "BriefDescription": "DEA Blocked Functions",
- "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
- "BriefDescription": "DEA Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
- "BriefDescription": "AES Functions",
- "PublicDescription": "Total number of AES functions issued by the CPU"
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
- "BriefDescription": "AES Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
- "BriefDescription": "AES Blocked Functions",
- "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
- "BriefDescription": "AES Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
index 4942b20a1ea1..ad40cc4f9727 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
@@ -4,357 +4,357 @@
"EventCode": "128",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+ "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
},
{
"Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "DTLB2_WRITES",
"BriefDescription": "DTLB2 Writes",
- "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache"
+ "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache. This is a replacement for what was provided for the DTLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "130",
"EventName": "DTLB2_MISSES",
"BriefDescription": "DTLB2 Misses",
- "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle"
+ "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle. This is a replacement for what was provided for the DTLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "DTLB2_HPAGE_WRITES",
"BriefDescription": "DTLB2 One-Megabyte Page Writes",
- "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page or a Last Host Translation was done"
+ "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page or a Last Host Translation was done."
},
{
"Unit": "CPU-M-CF",
"EventCode": "132",
"EventName": "DTLB2_GPAGE_WRITES",
"BriefDescription": "DTLB2 Two-Gigabyte Page Writes",
- "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB"
+ "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB."
},
{
"Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "L1D_L2D_SOURCED_WRITES",
"BriefDescription": "L1D L2D Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "ITLB2_WRITES",
"BriefDescription": "ITLB2 Writes",
- "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache"
+ "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache. This is a replacement for what was provided for the ITLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "ITLB2_MISSES",
"BriefDescription": "ITLB2 Misses",
- "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle"
+ "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle. This is a replacement for what was provided for the ITLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1I_L2I_SOURCED_WRITES",
"BriefDescription": "L1I L2I Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
- "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB"
+ "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB."
},
{
"Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
- "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB"
+ "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB."
},
{
"Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "TLB2_ENGINES_BUSY",
"BriefDescription": "TLB2 Engines Busy",
- "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle"
+ "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle."
},
{
"Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "TX_C_TEND",
"BriefDescription": "Completed TEND instructions in constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TX_NC_TEND",
"BriefDescription": "Completed TEND instructions in non-constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "143",
"EventName": "L1C_TLB2_MISSES",
"BriefDescription": "L1C TLB2 Misses",
- "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress"
+ "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress."
},
{
"Unit": "CPU-M-CF",
"EventCode": "144",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "L1D_ONCHIP_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache withountervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "L1D_ONCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D On-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "149",
"EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "151",
"EventName": "L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D Off-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "154",
"EventName": "L1D_OFFDRAWER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Drawer L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "156",
"EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "157",
"EventName": "L1D_OFFDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "158",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_RO",
"BriefDescription": "L1D On-Chip L3 Sourced Writes read-only",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line."
},
{
"Unit": "CPU-M-CF",
"EventCode": "162",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "163",
"EventName": "L1I_ONCHIP_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "164",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "165",
"EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "166",
"EventName": "L1I_ONCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I On-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "167",
"EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "168",
"EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "169",
"EventName": "L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I Off-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "170",
"EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "171",
"EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "172",
"EventName": "L1I_OFFDRAWER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "173",
"EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Drawer L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "174",
"EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "175",
"EventName": "L1I_OFFDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "224",
"EventName": "BCD_DFP_EXECUTION_SLOTS",
"BriefDescription": "BCD DFP Execution Slots",
- "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT"
+ "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT."
},
{
"Unit": "CPU-M-CF",
"EventCode": "225",
"EventName": "VX_BCD_EXECUTION_SLOTS",
"BriefDescription": "VX BCD Execution Slots",
- "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG"
+ "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG."
},
{
"Unit": "CPU-M-CF",
"EventCode": "226",
"EventName": "DECIMAL_INSTRUCTIONS",
"BriefDescription": "Decimal Instructions",
- "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP"
+ "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP."
},
{
"Unit": "CPU-M-CF",
"EventCode": "232",
"EventName": "LAST_HOST_TRANSLATIONS",
"BriefDescription": "Last host translation done",
- "PublicDescription": "Last Host Translation done"
+ "PublicDescription": "Last Host Translation done."
},
{
"Unit": "CPU-M-CF",
"EventCode": "243",
"EventName": "TX_NC_TABORT",
"BriefDescription": "Aborted transactions in non-constrained TX mode",
- "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode"
+ "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "244",
"EventName": "TX_C_TABORT_NO_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete."
},
{
"Unit": "CPU-M-CF",
"EventCode": "245",
"EventName": "TX_C_TABORT_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete."
},
{
"Unit": "CPU-M-CF",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/basic.json b/tools/perf/pmu-events/arch/s390/cf_z15/basic.json
index fc762e9f1d6e..1023d47028ce 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z15/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z15/basic.json
@@ -3,56 +3,56 @@
"Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
- "BriefDescription": "CPU Cycles",
- "PublicDescription": "Cycle Count"
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
- "BriefDescription": "Instructions",
- "PublicDescription": "Instruction Count"
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
- "BriefDescription": "L1I Directory Writes",
- "PublicDescription": "Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
- "BriefDescription": "L1I Penalty Cycles",
- "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
- "BriefDescription": "L1D Directory Writes",
- "PublicDescription": "Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
- "BriefDescription": "L1D Penalty Cycles",
- "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
- "BriefDescription": "Problem-State CPU Cycles",
- "PublicDescription": "Problem-State Cycle Count"
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
- "BriefDescription": "Problem-State Instructions",
- "PublicDescription": "Problem-State Instruction Count"
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json
deleted file mode 100644
index 3f28007d3892..000000000000
--- a/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json
+++ /dev/null
@@ -1,114 +0,0 @@
-[
- {
- "Unit": "CPU-M-CF",
- "EventCode": "64",
- "EventName": "PRNG_FUNCTIONS",
- "BriefDescription": "PRNG Functions",
- "PublicDescription": "Total number of the PRNG functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "65",
- "EventName": "PRNG_CYCLES",
- "BriefDescription": "PRNG Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "66",
- "EventName": "PRNG_BLOCKED_FUNCTIONS",
- "BriefDescription": "PRNG Blocked Functions",
- "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "67",
- "EventName": "PRNG_BLOCKED_CYCLES",
- "BriefDescription": "PRNG Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "68",
- "EventName": "SHA_FUNCTIONS",
- "BriefDescription": "SHA Functions",
- "PublicDescription": "Total number of SHA functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "69",
- "EventName": "SHA_CYCLES",
- "BriefDescription": "SHA Cycles",
- "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "70",
- "EventName": "SHA_BLOCKED_FUNCTIONS",
- "BriefDescription": "SHA Blocked Functions",
- "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "71",
- "EventName": "SHA_BLOCKED_CYCLES",
- "BriefDescription": "SHA Bloced Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "72",
- "EventName": "DEA_FUNCTIONS",
- "BriefDescription": "DEA Functions",
- "PublicDescription": "Total number of the DEA functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "73",
- "EventName": "DEA_CYCLES",
- "BriefDescription": "DEA Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "74",
- "EventName": "DEA_BLOCKED_FUNCTIONS",
- "BriefDescription": "DEA Blocked Functions",
- "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "75",
- "EventName": "DEA_BLOCKED_CYCLES",
- "BriefDescription": "DEA Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "76",
- "EventName": "AES_FUNCTIONS",
- "BriefDescription": "AES Functions",
- "PublicDescription": "Total number of AES functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "77",
- "EventName": "AES_CYCLES",
- "BriefDescription": "AES Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "78",
- "EventName": "AES_BLOCKED_FUNCTIONS",
- "BriefDescription": "AES Blocked Functions",
- "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "79",
- "EventName": "AES_BLOCKED_CYCLES",
- "BriefDescription": "AES Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- }
-]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json b/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json
index ad79189050a0..8b4380b8e489 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json
@@ -1,6 +1,118 @@
[
{
"Unit": "CPU-M-CF",
+ "EventCode": "64",
+ "EventName": "PRNG_FUNCTIONS",
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "65",
+ "EventName": "PRNG_CYCLES",
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "66",
+ "EventName": "PRNG_BLOCKED_FUNCTIONS",
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "67",
+ "EventName": "PRNG_BLOCKED_CYCLES",
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "68",
+ "EventName": "SHA_FUNCTIONS",
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "69",
+ "EventName": "SHA_CYCLES",
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "70",
+ "EventName": "SHA_BLOCKED_FUNCTIONS",
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "71",
+ "EventName": "SHA_BLOCKED_CYCLES",
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "72",
+ "EventName": "DEA_FUNCTIONS",
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "73",
+ "EventName": "DEA_CYCLES",
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "74",
+ "EventName": "DEA_BLOCKED_FUNCTIONS",
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "75",
+ "EventName": "DEA_BLOCKED_CYCLES",
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "76",
+ "EventName": "AES_FUNCTIONS",
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "77",
+ "EventName": "AES_CYCLES",
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "78",
+ "EventName": "AES_BLOCKED_FUNCTIONS",
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "79",
+ "EventName": "AES_BLOCKED_CYCLES",
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
"EventCode": "80",
"EventName": "ECC_FUNCTION_COUNT",
"BriefDescription": "ECC Function Count",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/extended.json b/tools/perf/pmu-events/arch/s390/cf_z15/extended.json
index 8ac61f8f286b..9c691c391086 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z15/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z15/extended.json
@@ -4,357 +4,357 @@
"EventCode": "128",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+ "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
},
{
"Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "DTLB2_WRITES",
"BriefDescription": "DTLB2 Writes",
- "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache"
+ "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache. This is a replacement for what was provided for the DTLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "130",
"EventName": "DTLB2_MISSES",
"BriefDescription": "DTLB2 Misses",
- "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle"
+ "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle. This is a replacement for what was provided for the DTLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "DTLB2_HPAGE_WRITES",
"BriefDescription": "DTLB2 One-Megabyte Page Writes",
- "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page"
+ "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page."
},
{
"Unit": "CPU-M-CF",
"EventCode": "132",
"EventName": "DTLB2_GPAGE_WRITES",
"BriefDescription": "DTLB2 Two-Gigabyte Page Writes",
- "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB"
+ "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB."
},
{
"Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "L1D_L2D_SOURCED_WRITES",
"BriefDescription": "L1D L2D Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "ITLB2_WRITES",
"BriefDescription": "ITLB2 Writes",
- "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache"
+ "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache. This is a replacement for what was provided for the ITLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "ITLB2_MISSES",
"BriefDescription": "ITLB2 Misses",
- "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle"
+ "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle. This is a replacement for what was provided for the ITLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1I_L2I_SOURCED_WRITES",
"BriefDescription": "L1I L2I Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
- "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB"
+ "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB."
},
{
"Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
- "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB"
+ "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB."
},
{
"Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "TLB2_ENGINES_BUSY",
"BriefDescription": "TLB2 Engines Busy",
- "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle"
+ "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle."
},
{
"Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "TX_C_TEND",
"BriefDescription": "Completed TEND instructions in constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TX_NC_TEND",
"BriefDescription": "Completed TEND instructions in non-constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "143",
"EventName": "L1C_TLB2_MISSES",
"BriefDescription": "L1C TLB2 Misses",
- "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress"
+ "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress."
},
{
"Unit": "CPU-M-CF",
"EventCode": "144",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "L1D_ONCHIP_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache withountervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "L1D_ONCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D On-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "149",
"EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "151",
"EventName": "L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D Off-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "154",
"EventName": "L1D_OFFDRAWER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Drawer L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "156",
"EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "157",
"EventName": "L1D_OFFDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "158",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_RO",
"BriefDescription": "L1D On-Chip L3 Sourced Writes read-only",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line."
},
{
"Unit": "CPU-M-CF",
"EventCode": "162",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "163",
"EventName": "L1I_ONCHIP_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "164",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "165",
"EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "166",
"EventName": "L1I_ONCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I On-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "167",
"EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "168",
"EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "169",
"EventName": "L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I Off-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "170",
"EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "171",
"EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "172",
"EventName": "L1I_OFFDRAWER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "173",
"EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Drawer L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "174",
"EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "175",
"EventName": "L1I_OFFDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "224",
"EventName": "BCD_DFP_EXECUTION_SLOTS",
"BriefDescription": "BCD DFP Execution Slots",
- "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT"
+ "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT."
},
{
"Unit": "CPU-M-CF",
"EventCode": "225",
"EventName": "VX_BCD_EXECUTION_SLOTS",
"BriefDescription": "VX BCD Execution Slots",
- "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG"
+ "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG."
},
{
"Unit": "CPU-M-CF",
"EventCode": "226",
"EventName": "DECIMAL_INSTRUCTIONS",
"BriefDescription": "Decimal Instructions",
- "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP"
+ "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP."
},
{
"Unit": "CPU-M-CF",
"EventCode": "232",
"EventName": "LAST_HOST_TRANSLATIONS",
"BriefDescription": "Last host translation done",
- "PublicDescription": "Last Host Translation done"
+ "PublicDescription": "Last Host Translation done."
},
{
"Unit": "CPU-M-CF",
"EventCode": "243",
"EventName": "TX_NC_TABORT",
"BriefDescription": "Aborted transactions in non-constrained TX mode",
- "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode"
+ "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "244",
"EventName": "TX_C_TABORT_NO_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete."
},
{
"Unit": "CPU-M-CF",
"EventCode": "245",
"EventName": "TX_C_TABORT_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete."
},
{
"Unit": "CPU-M-CF",
@@ -374,15 +374,15 @@
"Unit": "CPU-M-CF",
"EventCode": "264",
"EventName": "DFLT_CC",
- "BriefDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed",
+ "BriefDescription": "Increments DEFLATE CONVERSION CALL",
"PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed"
},
{
"Unit": "CPU-M-CF",
"EventCode": "265",
"EventName": "DFLT_CCFINISH",
- "BriefDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2",
- "PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2"
+ "BriefDescription": "Increments completed DEFLATE CONVERSION CALL",
+ "PublicDescription": " Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2 complete. "
},
{
"Unit": "CPU-M-CF",
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/basic.json b/tools/perf/pmu-events/arch/s390/cf_z16/basic.json
new file mode 100644
index 000000000000..1023d47028ce
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z16/basic.json
@@ -0,0 +1,58 @@
+[
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "0",
+ "EventName": "CPU_CYCLES",
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "1",
+ "EventName": "INSTRUCTIONS",
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "2",
+ "EventName": "L1I_DIR_WRITES",
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "3",
+ "EventName": "L1I_PENALTY_CYCLES",
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "4",
+ "EventName": "L1D_DIR_WRITES",
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "5",
+ "EventName": "L1D_PENALTY_CYCLES",
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "32",
+ "EventName": "PROBLEM_STATE_CPU_CYCLES",
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "33",
+ "EventName": "PROBLEM_STATE_INSTRUCTIONS",
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/crypto6.json b/tools/perf/pmu-events/arch/s390/cf_z16/crypto6.json
new file mode 100644
index 000000000000..8b4380b8e489
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z16/crypto6.json
@@ -0,0 +1,142 @@
+[
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "64",
+ "EventName": "PRNG_FUNCTIONS",
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "65",
+ "EventName": "PRNG_CYCLES",
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "66",
+ "EventName": "PRNG_BLOCKED_FUNCTIONS",
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "67",
+ "EventName": "PRNG_BLOCKED_CYCLES",
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "68",
+ "EventName": "SHA_FUNCTIONS",
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "69",
+ "EventName": "SHA_CYCLES",
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "70",
+ "EventName": "SHA_BLOCKED_FUNCTIONS",
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "71",
+ "EventName": "SHA_BLOCKED_CYCLES",
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "72",
+ "EventName": "DEA_FUNCTIONS",
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "73",
+ "EventName": "DEA_CYCLES",
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "74",
+ "EventName": "DEA_BLOCKED_FUNCTIONS",
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "75",
+ "EventName": "DEA_BLOCKED_CYCLES",
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "76",
+ "EventName": "AES_FUNCTIONS",
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "77",
+ "EventName": "AES_CYCLES",
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "78",
+ "EventName": "AES_BLOCKED_FUNCTIONS",
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "79",
+ "EventName": "AES_BLOCKED_CYCLES",
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "80",
+ "EventName": "ECC_FUNCTION_COUNT",
+ "BriefDescription": "ECC Function Count",
+ "PublicDescription": "This counter counts the total number of the elliptic-curve cryptography (ECC) functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "81",
+ "EventName": "ECC_CYCLES_COUNT",
+ "BriefDescription": "ECC Cycles Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the ECC coprocessor is busy performing the elliptic-curve cryptography (ECC) functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "82",
+ "EventName": "ECC_BLOCKED_FUNCTION_COUNT",
+ "BriefDescription": "Ecc Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the elliptic-curve cryptography (ECC) functions that are issued by the CPU and are blocked because the ECC coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "83",
+ "EventName": "ECC_BLOCKED_CYCLES_COUNT",
+ "BriefDescription": "ECC Blocked Cycles Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the elliptic-curve cryptography (ECC) functions issued by the CPU because the ECC coprocessor is busy performing a function issued by another CPU."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/extended.json b/tools/perf/pmu-events/arch/s390/cf_z16/extended.json
new file mode 100644
index 000000000000..c306190fc06f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z16/extended.json
@@ -0,0 +1,492 @@
+[
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "128",
+ "EventName": "L1D_RO_EXCL_WRITES",
+ "BriefDescription": "L1D Read-only Exclusive Writes",
+ "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "129",
+ "EventName": "DTLB2_WRITES",
+ "BriefDescription": "DTLB2 Writes",
+ "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the Level-1 Data cache. This is a replacement for what was provided for the DTLB on z13 and prior machines."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "130",
+ "EventName": "DTLB2_MISSES",
+ "BriefDescription": "DTLB2 Misses",
+ "PublicDescription": "A TLB2 miss is in progress for a request made by the Level-1 Data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle. This is a replacement for what was provided for the DTLB on z13 and prior machines."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "131",
+ "EventName": "CRSTE_1MB_WRITES",
+ "BriefDescription": "One Megabyte CRSTE writes",
+ "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "132",
+ "EventName": "DTLB2_GPAGE_WRITES",
+ "BriefDescription": "DTLB2 Two-Gigabyte Page Writes",
+ "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "134",
+ "EventName": "ITLB2_WRITES",
+ "BriefDescription": "ITLB2 Writes",
+ "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache. This is a replacement for what was provided for the ITLB on z13 and prior machines."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "135",
+ "EventName": "ITLB2_MISSES",
+ "BriefDescription": "ITLB2 Misses",
+ "PublicDescription": "A TLB2 miss is in progress for a request made by the Level-1 Instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle. This is a replacement for what was provided for the ITLB on z13 and prior machines."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "137",
+ "EventName": "TLB2_PTE_WRITES",
+ "BriefDescription": "TLB2 Page Table Entry Writes",
+ "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "138",
+ "EventName": "TLB2_CRSTE_WRITES",
+ "BriefDescription": "TLB2 Combined Region and Segment Entry Writes",
+ "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "139",
+ "EventName": "TLB2_ENGINES_BUSY",
+ "BriefDescription": "TLB2 Engines Busy",
+ "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "140",
+ "EventName": "TX_C_TEND",
+ "BriefDescription": "Completed TEND instructions in constrained TX mode",
+ "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "141",
+ "EventName": "TX_NC_TEND",
+ "BriefDescription": "Completed TEND instructions in non-constrained TX mode",
+ "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "143",
+ "EventName": "L1C_TLB2_MISSES",
+ "BriefDescription": "L1C TLB2 Misses",
+ "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "145",
+ "EventName": "DCW_REQ",
+ "BriefDescription": "Directory Write Level 1 Data Cache from Cache",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestor’s Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "146",
+ "EventName": "DCW_REQ_IV",
+ "BriefDescription": "Directory Write Level 1 Data Cache from Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestor’s Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "147",
+ "EventName": "DCW_REQ_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Data Cache from Cache with Chip HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestor’s Level-2 cache after using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "148",
+ "EventName": "DCW_REQ_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Data Cache from Cache with Drawer HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestor’s Level-2 cache after using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "149",
+ "EventName": "DCW_ON_CHIP",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Cache",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "150",
+ "EventName": "DCW_ON_CHIP_IV",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "151",
+ "EventName": "DCW_ON_CHIP_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Cache with Chip HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache after using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "152",
+ "EventName": "DCW_ON_CHIP_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Cache with Drawer HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "153",
+ "EventName": "DCW_ON_MODULE",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Module Cache",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Module Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "154",
+ "EventName": "DCW_ON_DRAWER",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Drawer Cache",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "155",
+ "EventName": "DCW_OFF_DRAWER",
+ "BriefDescription": "Directory Write Level 1 Data Cache from Off-Drawer Cache",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "156",
+ "EventName": "DCW_ON_CHIP_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Memory",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "157",
+ "EventName": "DCW_ON_MODULE_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Module Memory",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Module memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "158",
+ "EventName": "DCW_ON_DRAWER_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Drawer Memory",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "159",
+ "EventName": "DCW_OFF_DRAWER_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Data Cache from Off-Drawer Memory",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "160",
+ "EventName": "IDCW_ON_MODULE_IV",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Module Memory Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "161",
+ "EventName": "IDCW_ON_MODULE_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Module Memory Cache with Chip Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache using chip horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "162",
+ "EventName": "IDCW_ON_MODULE_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Module Memory Cache with Drawer Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "163",
+ "EventName": "IDCW_ON_DRAWER_IV",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Drawer Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "164",
+ "EventName": "IDCW_ON_DRAWER_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Drawer Cache with Chip Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "165",
+ "EventName": "IDCW_ON_DRAWER_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Drawer Cache with Drawer Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "166",
+ "EventName": "IDCW_OFF_DRAWER_IV",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from Off-Drawer Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "167",
+ "EventName": "IDCW_OFF_DRAWER_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from Off-Drawer Cache with Chip Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "168",
+ "EventName": "IDCW_OFF_DRAWER_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from Off-Drawer Cache with Drawer Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "169",
+ "EventName": "ICW_REQ",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from Cache",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced the requestors Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "170",
+ "EventName": "ICW_REQ_IV",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestors Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "171",
+ "EventName": "ICW_REQ_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from Cache with Chip HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestors Level-2 cache using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "172",
+ "EventName": "ICW_REQ_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from Cache with Drawer HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestor’s Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "173",
+ "EventName": "ICW_ON_CHIP",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Cache",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "174",
+ "EventName": "ICW_ON_CHIP_IV",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced an On-Chip Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "175",
+ "EventName": "ICW_ON_CHIP_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Cache with Chip HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-2 cache using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "176",
+ "EventName": "ICW_ON_CHIP_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Cache with Drawer HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip level 2 cache using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "177",
+ "EventName": "ICW_ON_MODULE",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Module Cache",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "178",
+ "EventName": "ICW_ON_DRAWER",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Drawer Cache",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced an On-Drawer Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "179",
+ "EventName": "ICW_OFF_DRAWER",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from Off-Drawer Cache",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced an Off-Drawer Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "180",
+ "EventName": "ICW_ON_CHIP_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Memory",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Chip memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "181",
+ "EventName": "ICW_ON_MODULE_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Module Memory",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Module memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "182",
+ "EventName": "ICW_ON_DRAWER_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Drawer Memory",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "183",
+ "EventName": "ICW_OFF_DRAWER_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from Off-Drawer Memory",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "224",
+ "EventName": "BCD_DFP_EXECUTION_SLOTS",
+ "BriefDescription": "Binary Coded Decimal to Decimal Floating Point conversions",
+ "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "225",
+ "EventName": "VX_BCD_EXECUTION_SLOTS",
+ "BriefDescription": "Count finished vector arithmetic Binary Coded Decimal instructions",
+ "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMP, VMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOP, VCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVD, VCVDG."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "226",
+ "EventName": "DECIMAL_INSTRUCTIONS",
+ "BriefDescription": "Decimal instruction dispatched",
+ "PublicDescription": "Decimal instruction dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "232",
+ "EventName": "LAST_HOST_TRANSLATIONS",
+ "BriefDescription": "Last host translation done",
+ "PublicDescription": "Last Host Translation done"
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "244",
+ "EventName": "TX_NC_TABORT",
+ "BriefDescription": "Aborted transactions in unconstrained TX mode",
+ "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "245",
+ "EventName": "TX_C_TABORT_NO_SPECIAL",
+ "BriefDescription": "Aborted transactions in constrained TX mode",
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "246",
+ "EventName": "TX_C_TABORT_SPECIAL",
+ "BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "248",
+ "EventName": "DFLT_ACCESS",
+ "BriefDescription": "Cycles CPU spent obtaining access to Deflate unit",
+ "PublicDescription": "Cycles CPU spent obtaining access to Deflate unit"
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "253",
+ "EventName": "DFLT_CYCLES",
+ "BriefDescription": "Cycles CPU is using Deflate unit",
+ "PublicDescription": "Cycles CPU is using Deflate unit"
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "256",
+ "EventName": "SORTL",
+ "BriefDescription": "Count SORTL instructions",
+ "PublicDescription": "Increments by one for every SORT LISTS instruction executed."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "265",
+ "EventName": "DFLT_CC",
+ "BriefDescription": "Increments DEFLATE CONVERSION CALL",
+ "PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "266",
+ "EventName": "DFLT_CCFINISH",
+ "BriefDescription": "Increments completed DEFLATE CONVERSION CALL",
+ "PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "267",
+ "EventName": "NNPA_INVOCATIONS",
+ "BriefDescription": "NNPA Total invocations",
+ "PublicDescription": "Increments by one for every Neural Network Processing Assist instruction executed."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "268",
+ "EventName": "NNPA_COMPLETIONS",
+ "BriefDescription": "NNPA Total completions",
+ "PublicDescription": "Increments by one for every Neural Network Processing Assist instruction executed that ended in Condition Codes 0, 1 or 2."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "269",
+ "EventName": "NNPA_WAIT_LOCK",
+ "BriefDescription": "Cycles spent obtaining NNPA lock",
+ "PublicDescription": "Cycles CPU spent obtaining access to IBM Z Integrated Accelerator for AI."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "270",
+ "EventName": "NNPA_HOLD_LOCK",
+ "BriefDescription": "Cycles spent holding NNPA lock",
+ "PublicDescription": "Cycles CPU is using IBM Z Integrated Accelerator for AI."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "448",
+ "EventName": "MT_DIAG_CYCLES_ONE_THR_ACTIVE",
+ "BriefDescription": "Cycle count with one thread active",
+ "PublicDescription": "Cycle count with one thread active"
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "449",
+ "EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE",
+ "BriefDescription": "Cycle count with two threads active",
+ "PublicDescription": "Cycle count with two threads active"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json
new file mode 100644
index 000000000000..1a0034f79f73
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json
@@ -0,0 +1,7 @@
+[
+ {
+ "BriefDescription": "Transaction count",
+ "MetricName": "transaction",
+ "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z196/basic.json b/tools/perf/pmu-events/arch/s390/cf_z196/basic.json
index 783de7f1aeaa..9bd20a5f47af 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z196/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z196/basic.json
@@ -3,84 +3,84 @@
"Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
- "BriefDescription": "CPU Cycles",
- "PublicDescription": "Cycle Count"
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
- "BriefDescription": "Instructions",
- "PublicDescription": "Instruction Count"
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
- "BriefDescription": "L1I Directory Writes",
- "PublicDescription": "Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
- "BriefDescription": "L1I Penalty Cycles",
- "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
- "BriefDescription": "L1D Directory Writes",
- "PublicDescription": "Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
- "BriefDescription": "L1D Penalty Cycles",
- "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
- "BriefDescription": "Problem-State CPU Cycles",
- "PublicDescription": "Problem-State Cycle Count"
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
- "BriefDescription": "Problem-State Instructions",
- "PublicDescription": "Problem-State Instruction Count"
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "34",
"EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
- "BriefDescription": "Problem-State L1I Directory Writes",
- "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "35",
"EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1I Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 instruction cache or unified cache while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "36",
"EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
- "BriefDescription": "Problem-State L1D Directory Writes",
- "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "37",
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1D Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 data cache while the CPU is in the problem state."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json
index 3f28007d3892..a8d391ddeb8c 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json
@@ -3,112 +3,112 @@
"Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
- "BriefDescription": "PRNG Functions",
- "PublicDescription": "Total number of the PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
- "BriefDescription": "PRNG Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
- "BriefDescription": "PRNG Blocked Functions",
- "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
- "BriefDescription": "PRNG Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
- "BriefDescription": "SHA Functions",
- "PublicDescription": "Total number of SHA functions issued by the CPU"
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
- "BriefDescription": "SHA Cycles",
- "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
- "BriefDescription": "SHA Blocked Functions",
- "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
- "BriefDescription": "SHA Bloced Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
- "BriefDescription": "DEA Functions",
- "PublicDescription": "Total number of the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
- "BriefDescription": "DEA Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
- "BriefDescription": "DEA Blocked Functions",
- "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
- "BriefDescription": "DEA Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
- "BriefDescription": "AES Functions",
- "PublicDescription": "Total number of AES functions issued by the CPU"
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
- "BriefDescription": "AES Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
- "BriefDescription": "AES Blocked Functions",
- "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
- "BriefDescription": "AES Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z196/extended.json b/tools/perf/pmu-events/arch/s390/cf_z196/extended.json
index 86b29fd181cf..6ebbdbaf7951 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z196/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z196/extended.json
@@ -4,14 +4,14 @@
"EventCode": "128",
"EventName": "L1D_L2_SOURCED_WRITES",
"BriefDescription": "L1D L2 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from the Level-2 cache"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from the Level-2 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "L1I_L2_SOURCED_WRITES",
"BriefDescription": "L1I L2 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from the Level-2 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from the Level-2 cache."
},
{
"Unit": "CPU-M-CF",
@@ -32,139 +32,139 @@
"EventCode": "133",
"EventName": "L2C_STORES_SENT",
"BriefDescription": "L2C Stores Sent",
- "PublicDescription": "Incremented by one for every store sent to Level-2 cache"
+ "PublicDescription": "Incremented by one for every store sent to Level-2 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Book L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Book Level-3 cache"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from an Off Book Level-3 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "L1D_ONBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an On Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from an On Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1I_ONBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an On Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from an On Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+ "PublicDescription": "A directory write to the Level-1 Data Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
},
{
"Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "L1D_OFFBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from an Off Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "L1I_OFFBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from an Off Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "DTLB1_HPAGE_WRITES",
"BriefDescription": "DTLB1 One-Megabyte Page Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page"
+ "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page."
},
{
"Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "L1D_LMEM_SOURCED_WRITES",
"BriefDescription": "L1D Local Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)"
+ "PublicDescription": "A directory write to the Level-1 Data Cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "142",
"EventName": "L1I_LMEM_SOURCED_WRITES",
"BriefDescription": "L1I Local Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "143",
"EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Book L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an Off Book Level-3 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from an Off Book Level-3 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "144",
"EventName": "DTLB1_WRITES",
"BriefDescription": "DTLB1 Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer (DTLB1)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "ITLB1_WRITES",
"BriefDescription": "ITLB1 Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer (ITLB1)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "TLB2_CRSTE_HPAGE_WRITES",
"BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation."
},
{
"Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an On Chip Level-3 cache"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from an On Chip Level-3 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an On Chip Level-3 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from an On Chip Level-3 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json b/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json
index 783de7f1aeaa..9bd20a5f47af 100644
--- a/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json
@@ -3,84 +3,84 @@
"Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
- "BriefDescription": "CPU Cycles",
- "PublicDescription": "Cycle Count"
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
- "BriefDescription": "Instructions",
- "PublicDescription": "Instruction Count"
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
- "BriefDescription": "L1I Directory Writes",
- "PublicDescription": "Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
- "BriefDescription": "L1I Penalty Cycles",
- "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
- "BriefDescription": "L1D Directory Writes",
- "PublicDescription": "Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
- "BriefDescription": "L1D Penalty Cycles",
- "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
- "BriefDescription": "Problem-State CPU Cycles",
- "PublicDescription": "Problem-State Cycle Count"
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
- "BriefDescription": "Problem-State Instructions",
- "PublicDescription": "Problem-State Instruction Count"
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "34",
"EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
- "BriefDescription": "Problem-State L1I Directory Writes",
- "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "35",
"EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1I Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 instruction cache or unified cache while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "36",
"EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
- "BriefDescription": "Problem-State L1D Directory Writes",
- "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "37",
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1D Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 data cache while the CPU is in the problem state."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json b/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json
index 3f28007d3892..a8d391ddeb8c 100644
--- a/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json
@@ -3,112 +3,112 @@
"Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
- "BriefDescription": "PRNG Functions",
- "PublicDescription": "Total number of the PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
- "BriefDescription": "PRNG Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
- "BriefDescription": "PRNG Blocked Functions",
- "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
- "BriefDescription": "PRNG Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
- "BriefDescription": "SHA Functions",
- "PublicDescription": "Total number of SHA functions issued by the CPU"
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
- "BriefDescription": "SHA Cycles",
- "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
- "BriefDescription": "SHA Blocked Functions",
- "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
- "BriefDescription": "SHA Bloced Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
- "BriefDescription": "DEA Functions",
- "PublicDescription": "Total number of the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
- "BriefDescription": "DEA Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
- "BriefDescription": "DEA Blocked Functions",
- "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
- "BriefDescription": "DEA Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
- "BriefDescription": "AES Functions",
- "PublicDescription": "Total number of AES functions issued by the CPU"
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
- "BriefDescription": "AES Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
- "BriefDescription": "AES Blocked Functions",
- "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
- "BriefDescription": "AES Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json b/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json
index f40cbed89418..9e765581382b 100644
--- a/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json
@@ -18,230 +18,230 @@
"EventCode": "130",
"EventName": "L1D_L2I_SOURCED_WRITES",
"BriefDescription": "L1D L2I Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Instruction cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "L1I_L2I_SOURCED_WRITES",
"BriefDescription": "L1I L2I Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "132",
"EventName": "L1D_L2D_SOURCED_WRITES",
"BriefDescription": "L1D L2D Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "DTLB1_WRITES",
"BriefDescription": "DTLB1 Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer (DTLB1)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "L1D_LMEM_SOURCED_WRITES",
"BriefDescription": "L1D Local Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)"
+ "PublicDescription": "A directory write to the Level-1 Data cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "L1I_LMEM_SOURCED_WRITES",
"BriefDescription": "L1I Local Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+ "PublicDescription": "A directory write to the Level-1 Data Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
},
{
"Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "DTLB1_HPAGE_WRITES",
"BriefDescription": "DTLB1 One-Megabyte Page Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page"
+ "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page."
},
{
"Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "ITLB1_WRITES",
"BriefDescription": "ITLB1 Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer (ITLB1)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "142",
"EventName": "TLB2_CRSTE_HPAGE_WRITES",
"BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation."
},
{
"Unit": "CPU-M-CF",
"EventCode": "143",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "144",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Book L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L1D_ONBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "L1D_OFFBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "149",
"EventName": "TX_NC_TEND",
"BriefDescription": "Completed TEND instructions in non-constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a nonconstrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a nonconstrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from a On Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from a On Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "151",
"EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Book L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "154",
"EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Book L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "156",
"EventName": "L1I_ONBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "157",
"EventName": "L1I_OFFBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "158",
"EventName": "TX_C_TEND",
"BriefDescription": "Completed TEND instructions in constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "159",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "160",
"EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "161",
"EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Book L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "177",
"EventName": "TX_NC_TABORT",
"BriefDescription": "Aborted transactions in non-constrained TX mode",
- "PublicDescription": "A transaction abort has occurred in a nonconstrained transactional-execution mode"
+ "PublicDescription": "A transaction abort has occurred in a nonconstrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "178",
"EventName": "TX_C_TABORT_NO_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete."
},
{
"Unit": "CPU-M-CF",
"EventCode": "179",
"EventName": "TX_C_TABORT_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete."
}
]
diff --git a/tools/perf/pmu-events/arch/s390/mapfile.csv b/tools/perf/pmu-events/arch/s390/mapfile.csv
index 61641a3480e0..a918e1af77a5 100644
--- a/tools/perf/pmu-events/arch/s390/mapfile.csv
+++ b/tools/perf/pmu-events/arch/s390/mapfile.csv
@@ -5,3 +5,4 @@ Family-model,Version,Filename,EventType
^IBM.296[45].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z13,core
^IBM.390[67].*[13]\.[1-5].[[:xdigit:]]+$,3,cf_z14,core
^IBM.856[12].*3\.6.[[:xdigit:]]+$,3,cf_z15,core
+^IBM.393[12].*3\.7.[[:xdigit:]]+$,3,cf_z16,core
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
new file mode 100644
index 000000000000..f8bdf7812b51
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
@@ -0,0 +1,792 @@
+[
+ {
+ "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
+ "MetricExpr": "100 * (( BR_INST_RETIRED.COND + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) ) / TOPDOWN.SLOTS)",
+ "MetricGroup": "Ret",
+ "MetricName": "Branching_Overhead",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "IPC",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / (INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD)",
+ "MetricGroup": "Pipeline;Mem",
+ "MetricName": "CPI",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Pipeline",
+ "MetricName": "CLKS",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "TOPDOWN.SLOTS",
+ "MetricGroup": "TmaL1",
+ "MetricName": "SLOTS",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
+ "MetricExpr": "TOPDOWN.SLOTS / ( TOPDOWN.SLOTS / 2 ) if #SMT_on else 1",
+ "MetricGroup": "SMT;TmaL1",
+ "MetricName": "Slots_Utilization",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "Execute_per_Issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.DISTRIBUTED",
+ "MetricGroup": "Ret;SMT;TmaL1",
+ "MetricName": "CoreIPC",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE ) / CPU_CLK_UNHALTED.DISTRIBUTED",
+ "MetricGroup": "Ret;Flops",
+ "MetricName": "FLOPc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
+ "MetricExpr": "( FP_ARITH_DISPATCHED.PORT_0 + FP_ARITH_DISPATCHED.PORT_1 + FP_ARITH_DISPATCHED.PORT_5 ) / ( 2 * CPU_CLK_UNHALTED.DISTRIBUTED )",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "FP_Arith_Utilization",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common).",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "ILP",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "MetricGroup": "SMT",
+ "MetricName": "CORE_CLKS",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "MetricGroup": "InsType",
+ "MetricName": "IpLoad",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "MetricGroup": "InsType",
+ "MetricName": "IpStore",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Branches;Fed;InsType",
+ "MetricName": "IpBranch",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "IpCall",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO",
+ "MetricName": "IpTB",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "BpTkBranch",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "IpFLOP",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / ( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) )",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "IpArith",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "IpArith_Scalar_SP",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "IpArith_Scalar_DP",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE )",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "IpArith_AVX128",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / ( FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "IpArith_AVX256",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu_core@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricGroup": "Prefetches",
+ "MetricName": "IpSWPF",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions, Sample with: INST_RETIRED.PREC_DIST",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary;TmaL1",
+ "MetricName": "Instructions",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions",
+ "MetricExpr": "INST_RETIRED.REP_ITERATION / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "Strings_Cycles",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu_core@ASSISTS.ANY\\,umask\\=0x1B@",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "IpAssist",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "Execute",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average number of Uops issued by front-end when it issued something",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu_core@UOPS_ISSUED.ANY\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "Fetch_UpC",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the LSD (Loop Stream Detector; aka Loop Cache)",
+ "MetricExpr": "LSD.UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "MetricGroup": "Fed;LSD",
+ "MetricName": "LSD_Coverage",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "MetricGroup": "DSB;Fed;FetchBW",
+ "MetricName": "DSB_Coverage",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
+ "MetricGroup": "DSBmiss",
+ "MetricName": "DSB_Switch_Cost",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MetricGroup": "DSBmiss;Fed",
+ "MetricName": "IpDSB_Miss_Ret",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "IpMispredict",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are non-taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "Cond_NT",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_TAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "Cond_TK",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are CALL or RET",
+ "MetricExpr": "( BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN ) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "CallRet",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "Jump",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of branches of other types (not individually covered by other metrics in Info.Branches group)",
+ "MetricExpr": "1 - ( (BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES) + (BR_INST_RETIRED.COND_TAKEN / BR_INST_RETIRED.ALL_BRANCHES) + (( BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN ) / BR_INST_RETIRED.ALL_BRANCHES) + ((BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES) )",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "Other_Branches",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / MEM_LOAD_COMPLETED.L1_MISS_ANY",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "Load_Miss_Real_Latency",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBound;MemoryBW",
+ "MetricName": "MLP",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L1MPKI",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1000 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L1MPKI_Load",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;Backend;CacheMisses",
+ "MetricName": "L2MPKI",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses;Offcore",
+ "MetricName": "L2MPKI_All",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L2MPKI_Load",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L2HPKI_All",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L2HPKI_Load",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L3MPKI",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "FB_HPKI",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricConstraint": "NO_NMI_WATCHDOG",
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING ) / ( 4 * CPU_CLK_UNHALTED.DISTRIBUTED )",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "Page_Walks_Utilization",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "(64 * L1D.REPLACEMENT / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW_1T",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "(64 * L2_LINES_IN.ALL / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW_1T",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW_1T",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW_1T",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "CPU_Utilization",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC) * msr@tsc@ / 1000000000 / duration_time",
+ "MetricGroup": "Summary;Power",
+ "MetricName": "Average_Frequency",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "( ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE ) / 1000000000 ) / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "GFLOPs",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "Turbo_Utilization",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_DISTRIBUTED if #SMT_on else 0",
+ "MetricGroup": "SMT",
+ "MetricName": "SMT_2T_Utilization",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "Kernel_Utilization",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "Kernel_CPI",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC",
+ "MetricName": "DRAM_BW_Use",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average number of parallel requests to external memory. Accounts for all requests",
+ "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / arb@event\\=0x81\\,umask\\=0x1@",
+ "MetricGroup": "Mem;SoC",
+ "MetricName": "MEM_Parallel_Requests",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "IpFarBranch",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to frontend stalls.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.ALL / (5 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Frontend_Bound",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.ALL / (5 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Bad_Speculation",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
+ "MetricConstraint": "NO_NMI_WATCHDOG",
+ "MetricExpr": "TOPDOWN_BE_BOUND.ALL / (5 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count. The rest of these subevents count backend stalls, in cycles, due to an outstanding request which is memory bound vs core bound. The subevents are not slot based events and therefore can not be precisely added or subtracted from the Backend_Bound_Aux subevents which are slot based.",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
+ "MetricExpr": "(TOPDOWN_BE_BOUND.ALL / (5 * CPU_CLK_UNHALTED.CORE))",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Backend_Bound_Aux",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that UOPS must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count. All of these subevents count backend stalls, in slots, due to a resource limitation. These are not cycle based events and therefore can not be precisely added or subtracted from the Backend_Bound subevents which are cycle based. These subevents are supplementary to Backend_Bound and can be used to analyze results from a resource perspective at allocation. ",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the numer of issue slots that result in retirement slots. ",
+ "MetricExpr": "TOPDOWN_RETIRING.ALL / (5 * CPU_CLK_UNHALTED.CORE)",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "Retiring",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "",
+ "MetricExpr": "CPU_CLK_UNHALTED.CORE",
+ "MetricName": "CLKS",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "",
+ "MetricExpr": "CPU_CLK_UNHALTED.CORE_P",
+ "MetricName": "CLKS_P",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "",
+ "MetricExpr": "5 * CPU_CLK_UNHALTED.CORE",
+ "MetricName": "SLOTS",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.CORE",
+ "MetricName": "IPC",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction",
+ "MetricExpr": "CPU_CLK_UNHALTED.CORE / INST_RETIRED.ANY",
+ "MetricName": "CPI",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.ALL / INST_RETIRED.ANY",
+ "MetricName": "UPI",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads with a store forward or unknown store address block",
+ "MetricExpr": "100 * LD_BLOCKS.DATA_UNKNOWN / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "Store_Fwd_Blocks",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads with a address aliasing block",
+ "MetricExpr": "100 * LD_BLOCKS.4K_ALIAS / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "Address_Alias_Blocks",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads that are splits",
+ "MetricExpr": "100 * MEM_UOPS_RETIRED.SPLIT_LOADS / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "Load_Splits",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricName": "IpBranch",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instruction per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.CALL",
+ "MetricName": "IpCall",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per Load",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "IpLoad",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per Store",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "MetricName": "IpStore",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricName": "IpMispredict",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch",
+ "MetricExpr": "INST_RETIRED.ANY / ( BR_INST_RETIRED.FAR_BRANCH / 2 )",
+ "MetricName": "IpFarBranch",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Ratio of all branches which mispredict",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricName": "Branch_Mispredict_Ratio",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Ratio between Mispredicted branches and unknown branches",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / BACLEARS.ANY",
+ "MetricName": "Branch_Mispredict_to_Unknown_Branch_Ratio",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are ucode ops",
+ "MetricExpr": "100 * UOPS_RETIRED.MS / UOPS_RETIRED.ALL",
+ "MetricName": "Microcode_Uop_Ratio",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are FPDiv uops",
+ "MetricExpr": "100 * UOPS_RETIRED.FPDIV / UOPS_RETIRED.ALL",
+ "MetricName": "FPDiv_Uop_Ratio",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are IDiv uops",
+ "MetricExpr": "100 * UOPS_RETIRED.IDIV / UOPS_RETIRED.ALL",
+ "MetricName": "IDiv_Uop_Ratio",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are x87 uops",
+ "MetricExpr": "100 * UOPS_RETIRED.X87 / UOPS_RETIRED.ALL",
+ "MetricName": "X87_Uop_Ratio",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.CORE / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricName": "Turbo_Utilization",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.CORE@k / CPU_CLK_UNHALTED.CORE",
+ "MetricName": "Kernel_Utilization",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "MetricName": "CPU_Utilization",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Estimated Pause cost. In percent",
+ "MetricExpr": "100 * SERIALIZATION.NON_C01_MS_SCB / (5 * CPU_CLK_UNHALTED.CORE)",
+ "MetricName": "Estimated_Pause_Cost",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Cycle cost per L2 hit",
+ "MetricExpr": "MEM_BOUND_STALLS.LOAD_L2_HIT / MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "MetricName": "Cycles_per_Demand_Load_L2_Hit",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Cycle cost per LLC hit",
+ "MetricExpr": "MEM_BOUND_STALLS.LOAD_LLC_HIT / MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "MetricName": "Cycles_per_Demand_Load_L3_Hit",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Cycle cost per DRAM hit",
+ "MetricExpr": "MEM_BOUND_STALLS.LOAD_DRAM_HIT / MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
+ "MetricName": "Cycles_per_Demand_Load_DRAM_Hit",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percent of instruction miss cost that hit in the L2",
+ "MetricExpr": "100 * MEM_BOUND_STALLS.IFETCH_L2_HIT / ( MEM_BOUND_STALLS.IFETCH )",
+ "MetricName": "Inst_Miss_Cost_L2Hit_Percent",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percent of instruction miss cost that hit in the L3",
+ "MetricExpr": "100 * MEM_BOUND_STALLS.IFETCH_LLC_HIT / ( MEM_BOUND_STALLS.IFETCH )",
+ "MetricName": "Inst_Miss_Cost_L3Hit_Percent",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percent of instruction miss cost that hit in DRAM",
+ "MetricExpr": "100 * MEM_BOUND_STALLS.IFETCH_DRAM_HIT / ( MEM_BOUND_STALLS.IFETCH )",
+ "MetricName": "Inst_Miss_Cost_DRAMHit_Percent",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "load ops retired per 1000 instruction",
+ "MetricExpr": "1000 * MEM_UOPS_RETIRED.ALL_LOADS / INST_RETIRED.ANY",
+ "MetricName": "MemLoadPKI",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "C1 residency percent per core",
+ "MetricExpr": "(cstate_core@c1\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C1_Core_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C8 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c8\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C8_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C9 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c9\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C9_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C10 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c10\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C10_Pkg_Residency"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json b/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json
index aa906a7fa520..fcaa487b8737 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json
@@ -611,7 +611,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -624,7 +623,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -637,7 +635,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -650,7 +647,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -663,7 +659,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -676,7 +671,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -689,7 +683,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -702,7 +695,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -715,7 +707,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -728,7 +719,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -741,7 +731,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -754,7 +743,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -767,7 +755,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -780,7 +767,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -793,7 +779,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -806,7 +791,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -819,7 +803,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -832,7 +815,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -845,7 +827,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -858,7 +839,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -871,7 +851,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -884,7 +863,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -897,7 +875,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -910,7 +887,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -923,7 +899,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -936,7 +911,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -949,7 +923,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -962,7 +935,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -975,7 +947,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -988,7 +959,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1001,7 +971,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1014,7 +983,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1027,7 +995,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1040,7 +1007,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1053,7 +1019,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1066,7 +1031,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1079,7 +1043,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1092,7 +1055,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1105,7 +1067,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1118,7 +1079,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1131,7 +1091,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1144,7 +1103,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1157,7 +1115,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1170,7 +1127,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1183,7 +1139,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1196,7 +1151,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1209,7 +1163,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1222,7 +1175,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1235,7 +1187,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1248,7 +1199,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1261,7 +1211,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1274,7 +1223,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1287,7 +1235,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1300,7 +1247,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1313,7 +1259,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1326,7 +1271,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1339,7 +1283,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1352,7 +1295,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1365,7 +1307,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1378,7 +1319,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1391,7 +1331,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1404,7 +1343,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1417,7 +1355,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1430,7 +1367,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1443,7 +1379,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1456,7 +1391,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1469,7 +1403,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1482,7 +1415,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1495,7 +1427,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1508,7 +1439,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1521,7 +1451,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1534,7 +1463,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1547,7 +1475,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1560,7 +1487,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1573,7 +1499,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1586,7 +1511,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1599,7 +1523,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1612,7 +1535,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1625,7 +1547,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1638,7 +1559,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1651,7 +1571,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1664,7 +1583,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1677,7 +1595,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1690,7 +1607,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1703,7 +1619,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1716,7 +1631,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1729,7 +1643,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1742,7 +1655,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1755,7 +1667,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1768,7 +1679,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1781,7 +1691,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1794,7 +1703,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1807,7 +1715,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1820,7 +1727,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1833,7 +1739,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1846,7 +1751,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1859,7 +1763,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1872,7 +1775,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1885,7 +1787,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1898,7 +1799,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1911,7 +1811,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1924,7 +1823,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1937,7 +1835,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1950,7 +1847,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1963,7 +1859,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1976,7 +1871,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1989,7 +1883,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2002,7 +1895,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2015,7 +1907,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C07F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2028,7 +1919,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C07F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2041,7 +1931,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C07F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2054,7 +1943,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C07F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2067,7 +1955,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C07F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2080,7 +1967,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C07F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2093,7 +1979,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C07F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2106,7 +1991,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C07F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2119,7 +2003,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F800807F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2132,7 +2015,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10000807F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2145,7 +2027,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8000807F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2158,7 +2039,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4000807F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2171,7 +2051,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000807F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2184,7 +2063,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000807F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2197,7 +2075,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800807F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2210,7 +2087,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F802007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2223,7 +2099,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10002007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2236,7 +2111,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8002007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2249,7 +2123,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4002007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2262,7 +2135,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1002007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2275,7 +2147,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2002007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2288,7 +2159,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x802007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2301,7 +2171,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F800407F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2314,7 +2183,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10000407F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2327,7 +2195,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8000407F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2340,7 +2207,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4000407F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2353,7 +2219,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000407F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2366,7 +2231,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000407F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2379,7 +2243,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800407F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2392,7 +2255,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F801007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2405,7 +2267,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10001007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2418,7 +2279,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8001007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2431,7 +2291,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4001007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2444,7 +2303,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1001007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2457,7 +2315,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2001007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2470,7 +2327,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x801007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2483,7 +2339,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2496,7 +2351,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2509,7 +2363,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2522,7 +2375,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2535,7 +2387,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2548,7 +2399,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2561,7 +2411,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2574,7 +2423,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2587,7 +2435,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2600,7 +2447,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2613,7 +2459,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2626,7 +2471,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2639,7 +2483,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2652,7 +2495,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2665,7 +2507,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2678,7 +2519,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2691,7 +2531,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2704,7 +2543,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2717,7 +2555,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2730,7 +2567,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2743,7 +2579,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2756,7 +2591,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2769,7 +2603,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2782,7 +2615,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2795,7 +2627,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2808,7 +2639,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2821,7 +2651,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2834,7 +2663,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2847,7 +2675,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2860,7 +2687,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2873,7 +2699,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2886,7 +2711,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2899,7 +2723,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2912,7 +2735,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2925,7 +2747,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2938,7 +2759,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2951,7 +2771,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2964,7 +2783,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2977,7 +2795,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2990,7 +2807,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3003,7 +2819,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3016,7 +2831,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3029,7 +2843,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3042,7 +2855,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3055,7 +2867,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3068,7 +2879,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3081,7 +2891,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3094,7 +2903,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3107,7 +2915,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3120,7 +2927,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3133,7 +2939,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3146,7 +2951,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3159,7 +2963,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3172,7 +2975,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3185,7 +2987,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3198,7 +2999,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3211,7 +3011,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3224,7 +3023,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3237,7 +3035,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3250,7 +3047,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3263,7 +3059,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3276,7 +3071,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3289,7 +3083,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3302,7 +3095,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3315,7 +3107,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3328,7 +3119,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3341,7 +3131,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3354,7 +3143,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3367,7 +3155,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3380,7 +3167,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3393,7 +3179,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3406,7 +3191,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3419,7 +3203,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3432,7 +3215,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3445,7 +3227,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3458,7 +3239,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3471,7 +3251,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3484,7 +3263,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3497,7 +3275,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3510,7 +3287,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3523,7 +3299,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3536,7 +3311,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3549,7 +3323,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3562,7 +3335,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3575,7 +3347,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3588,7 +3359,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3601,7 +3371,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3614,7 +3383,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3627,7 +3395,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3640,7 +3407,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3653,7 +3419,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3666,7 +3431,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3679,7 +3443,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3692,7 +3455,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3705,7 +3467,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3718,7 +3479,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3731,7 +3491,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3744,7 +3503,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3757,7 +3515,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3770,7 +3527,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3783,7 +3539,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3796,7 +3551,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3809,7 +3563,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3822,7 +3575,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3835,7 +3587,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3848,7 +3599,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3861,7 +3611,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3874,7 +3623,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3887,7 +3635,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3900,7 +3647,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3913,7 +3659,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3926,7 +3671,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3939,7 +3683,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3952,7 +3695,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3965,7 +3707,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3978,7 +3719,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3991,7 +3731,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4004,7 +3743,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4017,7 +3755,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4030,7 +3767,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4043,7 +3779,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4056,7 +3791,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4069,7 +3803,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4082,7 +3815,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4095,7 +3827,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4108,7 +3839,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4121,7 +3851,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4134,7 +3863,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4147,7 +3875,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4160,7 +3887,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4173,7 +3899,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4186,7 +3911,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4199,7 +3923,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4212,7 +3935,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4225,7 +3947,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4238,7 +3959,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4251,7 +3971,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4264,7 +3983,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4277,7 +3995,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4290,7 +4007,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4303,7 +4019,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4316,7 +4031,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4329,7 +4043,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4342,7 +4055,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4355,7 +4067,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4368,7 +4079,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4381,7 +4091,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4394,7 +4103,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4407,7 +4115,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4420,7 +4127,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4433,7 +4139,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4446,7 +4151,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4459,7 +4163,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4472,7 +4175,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4485,7 +4187,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4498,7 +4199,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4511,7 +4211,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4524,7 +4223,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4537,7 +4235,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4550,7 +4247,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80208000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4563,7 +4259,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000208000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4576,7 +4271,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800208000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4589,7 +4283,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400208000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4602,7 +4295,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100208000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4615,7 +4307,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200208000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4628,7 +4319,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80208000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4641,7 +4331,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4654,7 +4343,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4667,7 +4355,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4680,7 +4367,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4693,7 +4379,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4706,7 +4391,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4719,7 +4403,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4732,7 +4415,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4745,7 +4427,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4758,7 +4439,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4771,7 +4451,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4784,7 +4463,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4797,7 +4475,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4810,7 +4487,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4823,7 +4499,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4836,7 +4511,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4849,7 +4523,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4862,7 +4535,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4875,7 +4547,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4888,7 +4559,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4901,7 +4571,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4914,7 +4583,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4927,7 +4595,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4940,7 +4607,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4953,7 +4619,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4966,7 +4631,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4979,7 +4643,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4992,7 +4655,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5005,7 +4667,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5018,7 +4679,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5031,7 +4691,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5044,7 +4703,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5057,7 +4715,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5070,7 +4727,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5083,7 +4739,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5096,7 +4751,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5109,7 +4763,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5122,7 +4775,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5135,7 +4787,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5148,7 +4799,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5161,7 +4811,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5174,7 +4823,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5187,7 +4835,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5200,7 +4847,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5213,7 +4859,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5226,7 +4871,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5239,7 +4883,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5252,7 +4895,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5265,7 +4907,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5278,7 +4919,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5291,7 +4931,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5304,7 +4943,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5317,7 +4955,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5330,7 +4967,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5343,7 +4979,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5356,7 +4991,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5369,7 +5003,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5382,7 +5015,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5395,7 +5027,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5408,7 +5039,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5421,7 +5051,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5434,7 +5063,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5447,7 +5075,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5460,7 +5087,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5473,7 +5099,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5486,7 +5111,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5499,7 +5123,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5512,7 +5135,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5525,7 +5147,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5538,7 +5159,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5551,7 +5171,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5564,7 +5183,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5577,7 +5195,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5590,7 +5207,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5603,7 +5219,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5616,7 +5231,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5629,7 +5243,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5642,7 +5255,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5655,7 +5267,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5668,7 +5279,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5681,7 +5291,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5694,7 +5303,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5707,7 +5315,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5720,7 +5327,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5733,7 +5339,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5746,7 +5351,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5759,7 +5363,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5772,7 +5375,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5785,7 +5387,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5798,7 +5399,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5811,7 +5411,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5824,7 +5423,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5837,7 +5435,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5850,7 +5447,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5863,7 +5459,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5876,7 +5471,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5889,7 +5483,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5902,7 +5495,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5915,7 +5507,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5928,7 +5519,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5941,7 +5531,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5954,7 +5543,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5967,7 +5555,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5980,7 +5567,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5993,7 +5579,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6006,7 +5591,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6019,7 +5603,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6032,7 +5615,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6045,7 +5627,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6058,7 +5639,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6071,7 +5651,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6084,7 +5663,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6097,7 +5675,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6110,7 +5687,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6123,7 +5699,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6136,7 +5711,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6149,7 +5723,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6162,7 +5735,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6175,7 +5747,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6188,7 +5759,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6201,7 +5771,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6214,7 +5783,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6227,7 +5795,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6240,7 +5807,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6253,7 +5819,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6266,7 +5831,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6279,7 +5843,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6292,7 +5855,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6305,7 +5867,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6318,7 +5879,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6331,7 +5891,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6344,7 +5903,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6357,7 +5915,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6370,7 +5927,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6383,7 +5939,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6396,7 +5951,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6409,7 +5963,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6422,7 +5975,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6435,7 +5987,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6448,7 +5999,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6461,7 +6011,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6474,7 +6023,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6487,7 +6035,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6500,7 +6047,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6513,7 +6059,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6526,7 +6071,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6539,7 +6083,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6552,7 +6095,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6565,7 +6107,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6578,7 +6119,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6591,7 +6131,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6604,7 +6143,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6617,7 +6155,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6630,7 +6167,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6643,7 +6179,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6656,7 +6191,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6669,7 +6203,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6682,7 +6215,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6695,7 +6227,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6708,7 +6239,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6721,7 +6251,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6734,7 +6263,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6747,7 +6275,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6760,7 +6287,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6773,7 +6299,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6786,7 +6311,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6799,7 +6323,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6812,7 +6335,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6825,7 +6347,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6838,7 +6359,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6851,7 +6371,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6864,7 +6383,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6877,7 +6395,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6890,7 +6407,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6903,7 +6419,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6916,7 +6431,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6929,7 +6443,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6942,7 +6455,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6955,7 +6467,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6968,7 +6479,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6981,7 +6491,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6994,7 +6503,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7007,7 +6515,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7020,7 +6527,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7033,7 +6539,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7046,7 +6551,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7059,7 +6563,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7072,7 +6575,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7085,7 +6587,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7098,7 +6599,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7111,7 +6611,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7124,7 +6623,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7137,7 +6635,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7150,7 +6647,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7328,7 +6824,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7342,7 +6837,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7356,7 +6850,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7370,7 +6863,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7384,7 +6876,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7398,7 +6889,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7412,7 +6902,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7426,7 +6915,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7440,7 +6928,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7454,7 +6941,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7468,7 +6954,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7482,7 +6967,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7496,7 +6980,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7510,7 +6993,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7524,7 +7006,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7538,7 +7019,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7552,7 +7032,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7566,7 +7045,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7580,7 +7058,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7594,7 +7071,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7608,7 +7084,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7622,7 +7097,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7636,7 +7110,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7650,7 +7123,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7664,7 +7136,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7678,7 +7149,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7692,7 +7162,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7706,7 +7175,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7720,7 +7188,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7734,7 +7201,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7748,7 +7214,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7762,7 +7227,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7776,7 +7240,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7790,7 +7253,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7804,7 +7266,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7818,7 +7279,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7832,7 +7292,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7846,7 +7305,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7860,7 +7318,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7874,7 +7331,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7888,7 +7344,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7902,7 +7357,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7916,7 +7370,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7930,7 +7383,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7944,7 +7396,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7958,7 +7409,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7972,7 +7422,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7986,7 +7435,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8000,7 +7448,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8014,7 +7461,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8028,7 +7474,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8042,7 +7487,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8056,7 +7500,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8070,7 +7513,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8084,7 +7526,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8098,7 +7539,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8112,7 +7552,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8126,7 +7565,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8140,7 +7578,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8154,7 +7591,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8168,7 +7604,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8182,7 +7617,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8196,7 +7630,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8210,7 +7643,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8224,7 +7656,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8238,7 +7669,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8252,7 +7682,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8266,7 +7695,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8280,7 +7708,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8294,7 +7721,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8308,7 +7734,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8322,7 +7747,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8336,7 +7760,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8350,7 +7773,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8364,7 +7786,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8378,7 +7799,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8392,7 +7812,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8406,7 +7825,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8420,7 +7838,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8434,7 +7851,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8448,7 +7864,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8462,7 +7877,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8476,7 +7890,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8490,7 +7903,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8504,7 +7916,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8518,7 +7929,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8532,7 +7942,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8546,7 +7955,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8560,7 +7968,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8574,7 +7981,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8588,7 +7994,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8602,7 +8007,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8616,7 +8020,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8630,7 +8033,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8644,7 +8046,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8658,7 +8059,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8672,7 +8072,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8686,7 +8085,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8700,7 +8098,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8714,7 +8111,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8728,7 +8124,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8742,7 +8137,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8756,7 +8150,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8770,7 +8163,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8784,7 +8176,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8798,7 +8189,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8812,7 +8202,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8826,7 +8215,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8840,7 +8228,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8854,7 +8241,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8868,7 +8254,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8882,7 +8267,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8896,7 +8280,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8910,7 +8293,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8924,7 +8306,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8938,7 +8319,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8952,7 +8332,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8966,7 +8345,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8980,7 +8358,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8994,7 +8371,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9008,7 +8384,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9022,7 +8397,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9036,7 +8410,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9050,7 +8423,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9064,7 +8436,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9078,7 +8449,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9092,7 +8462,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9106,7 +8475,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9120,7 +8488,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9134,7 +8501,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9148,7 +8514,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9162,7 +8527,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9176,7 +8540,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9190,7 +8553,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9204,7 +8566,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9218,7 +8579,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9232,7 +8592,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9246,7 +8605,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9260,7 +8618,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9274,7 +8631,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9288,7 +8644,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9302,7 +8657,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x107F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9316,7 +8670,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C07F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9330,7 +8683,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C07F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9344,7 +8696,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C07F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9358,7 +8709,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C07F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9372,7 +8722,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C07F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9386,7 +8735,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C07F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9400,7 +8748,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C07F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9414,7 +8761,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C07F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9428,7 +8774,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F800807F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9442,7 +8787,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10000807F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9456,7 +8800,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8000807F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9470,7 +8813,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4000807F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9484,7 +8826,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000807F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9498,7 +8839,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000807F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9512,7 +8852,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800807F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9526,7 +8865,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F802007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9540,7 +8878,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10002007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9554,7 +8891,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8002007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9568,7 +8904,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4002007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9582,7 +8917,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1002007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9596,7 +8930,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2002007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9610,7 +8943,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x802007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9624,7 +8956,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F800407F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9638,7 +8969,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10000407F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9652,7 +8982,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8000407F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9666,7 +8995,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4000407F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9680,7 +9008,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000407F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9694,7 +9021,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000407F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9708,7 +9034,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800407F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9722,7 +9047,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F801007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9736,7 +9060,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10001007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9750,7 +9073,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8001007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9764,7 +9086,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4001007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9778,7 +9099,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1001007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9792,7 +9112,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2001007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9806,7 +9125,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x801007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9820,7 +9138,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F804007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9834,7 +9151,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9848,7 +9164,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9862,7 +9177,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F800207F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9876,7 +9190,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10000207F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9890,7 +9203,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8000207F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9904,7 +9216,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4000207F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9918,7 +9229,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000207F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9932,7 +9242,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000207F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9946,7 +9255,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800207F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9960,7 +9268,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9974,7 +9281,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9988,7 +9294,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10002,7 +9307,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10016,7 +9320,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10030,7 +9333,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10044,7 +9346,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10058,7 +9359,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10072,7 +9372,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10086,7 +9385,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10100,7 +9398,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10114,7 +9411,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10128,7 +9424,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10142,7 +9437,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10156,7 +9450,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10170,7 +9463,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10184,7 +9476,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10198,7 +9489,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10212,7 +9502,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10226,7 +9515,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10240,7 +9528,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10254,7 +9541,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10268,7 +9554,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10282,7 +9567,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10296,7 +9580,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10310,7 +9593,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10324,7 +9606,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10338,7 +9619,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10352,7 +9632,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10366,7 +9645,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10380,7 +9658,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10394,7 +9671,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10408,7 +9684,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10422,7 +9697,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10436,7 +9710,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10450,7 +9723,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10464,7 +9736,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10478,7 +9749,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10492,7 +9762,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10506,7 +9775,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10520,7 +9788,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10534,7 +9801,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10548,7 +9814,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10562,7 +9827,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10576,7 +9840,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10590,7 +9853,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10604,7 +9866,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10618,7 +9879,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10632,7 +9892,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10646,7 +9905,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10660,7 +9918,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10674,7 +9931,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10688,7 +9944,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10702,7 +9957,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10716,7 +9970,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10730,7 +9983,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10744,7 +9996,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10758,7 +10009,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10772,7 +10022,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10786,7 +10035,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10800,7 +10048,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10814,7 +10061,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10828,7 +10074,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10842,7 +10087,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10856,7 +10100,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10870,7 +10113,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10884,7 +10126,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10898,7 +10139,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10912,7 +10152,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10926,7 +10165,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10940,7 +10178,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10954,7 +10191,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10968,7 +10204,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10982,7 +10217,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -10996,7 +10230,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11010,7 +10243,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11024,7 +10256,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11038,7 +10269,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11052,7 +10282,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11066,7 +10295,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11080,7 +10308,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11094,7 +10321,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11108,7 +10334,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11122,7 +10347,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11136,7 +10360,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11150,7 +10373,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11164,7 +10386,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11178,7 +10399,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11192,7 +10412,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11206,7 +10425,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11220,7 +10438,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11234,7 +10451,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11248,7 +10464,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11262,7 +10477,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11276,7 +10490,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11290,7 +10503,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11304,7 +10516,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11318,7 +10529,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11332,7 +10542,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11346,7 +10555,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11360,7 +10568,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11374,7 +10581,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11388,7 +10594,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11402,7 +10607,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11416,7 +10620,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11430,7 +10633,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11444,7 +10646,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11458,7 +10659,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11472,7 +10672,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11486,7 +10685,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11500,7 +10698,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11514,7 +10711,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11528,7 +10724,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11542,7 +10737,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11556,7 +10750,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11570,7 +10763,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11584,7 +10776,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11598,7 +10789,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11612,7 +10802,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11626,7 +10815,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11640,7 +10828,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11654,7 +10841,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11668,7 +10854,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11682,7 +10867,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11696,7 +10880,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11710,7 +10893,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11724,7 +10906,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11738,7 +10919,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11752,7 +10932,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11766,7 +10945,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11780,7 +10958,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11794,7 +10971,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11808,7 +10984,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11822,7 +10997,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11836,7 +11010,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11850,7 +11023,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11864,7 +11036,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11878,7 +11049,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11892,7 +11062,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11906,7 +11075,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11920,7 +11088,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11934,7 +11101,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11948,7 +11114,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11962,7 +11127,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11976,7 +11140,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -11990,7 +11153,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12004,7 +11166,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12018,7 +11179,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12032,7 +11192,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12046,7 +11205,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12060,7 +11218,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12074,7 +11231,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12088,7 +11244,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12102,7 +11257,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12116,7 +11270,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12130,7 +11283,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12144,7 +11296,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12158,7 +11309,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12172,7 +11322,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12186,7 +11335,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12200,7 +11348,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12214,7 +11361,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12228,7 +11374,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12242,7 +11387,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12256,7 +11400,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12270,7 +11413,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12284,7 +11426,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12298,7 +11439,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12312,7 +11452,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12326,7 +11465,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12340,7 +11478,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12354,7 +11491,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12368,7 +11504,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12382,7 +11517,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12396,7 +11530,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12410,7 +11543,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12424,7 +11556,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12438,7 +11569,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12452,7 +11582,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12466,7 +11595,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12480,7 +11608,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12494,7 +11621,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12508,7 +11634,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12522,7 +11647,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12536,7 +11660,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12550,7 +11673,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12564,7 +11686,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12578,7 +11699,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12592,7 +11712,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x18000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12606,7 +11725,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12620,7 +11738,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12634,7 +11751,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12648,7 +11764,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12662,7 +11777,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12676,7 +11790,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12690,7 +11803,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12704,7 +11816,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12718,7 +11829,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12732,7 +11842,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12746,7 +11855,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12760,7 +11868,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12774,7 +11881,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12788,7 +11894,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12802,7 +11907,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12816,7 +11920,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80208000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12830,7 +11933,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000208000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12844,7 +11946,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800208000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12858,7 +11959,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400208000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12872,7 +11972,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100208000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12886,7 +11985,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200208000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12900,7 +11998,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80208000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12914,7 +12011,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12928,7 +12024,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12942,7 +12037,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12956,7 +12050,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12970,7 +12063,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12984,7 +12076,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -12998,7 +12089,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13012,7 +12102,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13026,7 +12115,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13040,7 +12128,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13054,7 +12141,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13068,7 +12154,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13082,7 +12167,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13096,7 +12180,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13110,7 +12193,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13124,7 +12206,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13138,7 +12219,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13152,7 +12232,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13166,7 +12245,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13180,7 +12258,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13194,7 +12271,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13208,7 +12284,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13222,7 +12297,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13236,7 +12310,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13250,7 +12323,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13264,7 +12336,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13278,7 +12349,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13292,7 +12362,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13306,7 +12375,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13320,7 +12388,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13334,7 +12401,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13348,7 +12414,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13362,7 +12427,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13376,7 +12440,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13390,7 +12453,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13404,7 +12466,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13418,7 +12479,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13432,7 +12492,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13446,7 +12505,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13460,7 +12518,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13474,7 +12531,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13488,7 +12544,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13502,7 +12557,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13516,7 +12570,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13530,7 +12583,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13544,7 +12596,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13558,7 +12609,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13572,7 +12622,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13586,7 +12635,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13600,7 +12648,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13614,7 +12661,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13628,7 +12674,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13642,7 +12687,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13656,7 +12700,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13670,7 +12713,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13684,7 +12726,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13698,7 +12739,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13712,7 +12752,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13726,7 +12765,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13740,7 +12778,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13754,7 +12791,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13768,7 +12804,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13782,7 +12817,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13796,7 +12830,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13810,7 +12843,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13824,7 +12856,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13838,7 +12869,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13852,7 +12882,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13866,7 +12895,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13880,7 +12908,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13894,7 +12921,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13908,7 +12934,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13922,7 +12947,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13936,7 +12960,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13950,7 +12973,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13964,7 +12986,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13978,7 +12999,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -13992,7 +13012,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14006,7 +13025,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14020,7 +13038,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14034,7 +13051,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14048,7 +13064,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14062,7 +13077,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14076,7 +13090,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14090,7 +13103,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14104,7 +13116,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14118,7 +13129,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14132,7 +13142,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14146,7 +13155,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14160,7 +13168,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14174,7 +13181,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14188,7 +13194,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14202,7 +13207,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14216,7 +13220,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14230,7 +13233,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14244,7 +13246,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14258,7 +13259,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14272,7 +13272,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14286,7 +13285,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14300,7 +13298,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14314,7 +13311,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14328,7 +13324,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14342,7 +13337,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14356,7 +13350,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14370,7 +13363,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14384,7 +13376,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14398,7 +13389,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14412,7 +13402,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14426,7 +13415,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14440,7 +13428,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14454,7 +13441,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14468,7 +13454,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14482,7 +13467,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14496,7 +13480,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14510,7 +13493,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14524,7 +13506,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14538,7 +13519,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14552,7 +13532,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14566,7 +13545,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14580,7 +13558,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14594,7 +13571,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14608,7 +13584,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14622,7 +13597,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14636,7 +13610,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14650,7 +13623,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14664,7 +13636,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14678,7 +13649,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14692,7 +13662,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14706,7 +13675,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14720,7 +13688,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14734,7 +13701,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14748,7 +13714,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14762,7 +13727,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14776,7 +13740,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14790,7 +13753,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14804,7 +13766,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14818,7 +13779,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14832,7 +13792,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14846,7 +13805,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14860,7 +13818,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14874,7 +13831,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14888,7 +13844,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14902,7 +13857,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14916,7 +13870,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14930,7 +13883,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14944,7 +13896,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14958,7 +13909,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14972,7 +13922,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -14986,7 +13935,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15000,7 +13948,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15014,7 +13961,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15028,7 +13974,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15042,7 +13987,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15056,7 +14000,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15070,7 +14013,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15084,7 +14026,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15098,7 +14039,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15112,7 +14052,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15126,7 +14065,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15140,7 +14078,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15154,7 +14091,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15168,7 +14104,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15182,7 +14117,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15196,7 +14130,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15210,7 +14143,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15224,7 +14156,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15238,7 +14169,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15252,7 +14182,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15266,7 +14195,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15280,7 +14208,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15294,7 +14221,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15308,7 +14234,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15322,7 +14247,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15336,7 +14260,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15350,7 +14273,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15364,7 +14286,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15378,7 +14299,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15392,7 +14312,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15406,7 +14325,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15420,7 +14338,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15434,7 +14351,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15448,7 +14364,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15462,7 +14377,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15476,7 +14390,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15490,7 +14403,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15504,7 +14416,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15518,7 +14429,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15532,7 +14442,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15546,7 +14455,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15560,7 +14468,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15574,7 +14481,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15588,7 +14494,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15602,7 +14507,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15616,7 +14520,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15630,7 +14533,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15644,7 +14546,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15658,7 +14559,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15672,7 +14572,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15686,7 +14585,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15700,7 +14598,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15714,7 +14611,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15728,7 +14624,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15742,7 +14637,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15756,7 +14650,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15770,7 +14663,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15784,7 +14676,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15798,7 +14689,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15812,7 +14702,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15826,7 +14715,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15840,7 +14728,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15854,7 +14741,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15868,7 +14754,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15882,7 +14767,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15896,7 +14780,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15910,7 +14793,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15924,7 +14806,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15938,7 +14819,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15952,7 +14832,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15966,7 +14845,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8007C0100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15980,7 +14858,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2003C0100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -15994,7 +14871,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x803C0100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16008,7 +14884,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80080100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16022,7 +14897,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16036,7 +14910,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800080100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16050,7 +14923,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16064,7 +14936,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16078,7 +14949,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16092,7 +14962,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16106,7 +14975,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16120,7 +14988,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000200100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16134,7 +15001,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800200100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16148,7 +15014,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400200100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16162,7 +15027,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100200100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16176,7 +15040,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200200100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16190,7 +15053,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80200100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16204,7 +15066,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16218,7 +15079,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16232,7 +15092,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800040100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16246,7 +15105,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16260,7 +15118,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16274,7 +15131,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16288,7 +15144,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16302,7 +15157,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80100100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16316,7 +15170,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16330,7 +15183,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800100100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16344,7 +15196,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16358,7 +15209,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16372,7 +15222,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16386,7 +15235,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16400,7 +15248,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16414,7 +15261,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16428,7 +15274,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16442,7 +15287,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16456,7 +15300,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16470,7 +15313,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16484,7 +15326,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16498,7 +15339,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16512,7 +15352,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16526,7 +15365,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -16576,4 +15414,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x4"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json b/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json
index ae55c35c2f19..36042010d768 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json
@@ -236,7 +236,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -249,7 +248,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -262,7 +260,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -275,7 +272,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -288,7 +284,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -301,7 +296,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -314,7 +308,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -327,7 +320,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -340,7 +332,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -353,7 +344,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -366,7 +356,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -379,7 +368,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -392,7 +380,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -405,7 +392,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -418,7 +404,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -431,7 +416,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -444,7 +428,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -457,7 +440,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -470,7 +452,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -483,7 +464,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -496,7 +476,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -509,7 +488,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -522,7 +500,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -535,7 +512,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -548,7 +524,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -561,7 +536,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -574,7 +548,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -587,7 +560,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -600,7 +572,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -613,7 +584,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -626,7 +596,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -639,7 +608,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -652,7 +620,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -665,7 +632,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -678,7 +644,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -691,7 +656,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -704,7 +668,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -717,7 +680,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -730,7 +692,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -743,7 +704,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -756,7 +716,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -769,7 +728,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -782,7 +740,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -795,7 +752,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -808,7 +764,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -821,7 +776,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -834,7 +788,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -847,7 +800,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -860,7 +812,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -873,7 +824,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -886,7 +836,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -899,7 +848,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -912,7 +860,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -925,7 +872,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -938,7 +884,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -951,7 +896,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -964,7 +908,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -977,7 +920,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -990,7 +932,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1003,7 +944,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1016,7 +956,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1029,7 +968,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1042,7 +980,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1055,7 +992,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1068,7 +1004,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1081,7 +1016,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1094,7 +1028,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1107,7 +1040,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1120,7 +1052,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1133,7 +1064,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1146,7 +1076,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1159,7 +1088,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1172,7 +1100,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1185,7 +1112,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1198,7 +1124,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1211,7 +1136,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC0007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1224,7 +1148,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C0007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1237,7 +1160,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C0007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1250,7 +1172,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C0007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1263,7 +1184,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C0007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1276,7 +1196,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1289,7 +1208,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1302,7 +1220,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C0007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1315,7 +1232,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC0007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1328,7 +1244,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F840007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1341,7 +1256,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10040007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1354,7 +1268,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8040007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1367,7 +1280,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4040007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1380,7 +1292,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1040007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1393,7 +1304,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2040007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1406,7 +1316,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x6040007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1419,7 +1328,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x840007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1432,7 +1340,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B8007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1445,7 +1352,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F900007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1458,7 +1364,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10100007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1471,7 +1376,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8100007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1484,7 +1388,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4100007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1497,7 +1400,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1100007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1510,7 +1412,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2100007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1523,7 +1424,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x900007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1536,7 +1436,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1549,7 +1448,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1562,7 +1460,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1575,7 +1472,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1588,7 +1484,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1601,7 +1496,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1614,7 +1508,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1627,7 +1520,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1640,7 +1532,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1653,7 +1544,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1666,7 +1556,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1679,7 +1568,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1692,7 +1580,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1705,7 +1592,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1718,7 +1604,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1731,7 +1616,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1744,7 +1628,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1757,7 +1640,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1770,7 +1652,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1783,7 +1664,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1796,7 +1676,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1809,7 +1688,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1822,7 +1700,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1835,7 +1712,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1848,7 +1724,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1861,7 +1736,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1874,7 +1748,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1887,7 +1760,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1900,7 +1772,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1913,7 +1784,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1926,7 +1796,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1939,7 +1808,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1952,7 +1820,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1965,7 +1832,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1978,7 +1844,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1991,7 +1856,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2004,7 +1868,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2017,7 +1880,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2030,7 +1892,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2043,7 +1904,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2056,7 +1916,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2069,7 +1928,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2082,7 +1940,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2095,7 +1952,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2108,7 +1964,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2121,7 +1976,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2134,7 +1988,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2147,7 +2000,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2160,7 +2012,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2173,7 +2024,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2186,7 +2036,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2199,7 +2048,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2212,7 +2060,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2225,7 +2072,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2238,7 +2084,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2251,7 +2096,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2264,7 +2108,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2277,7 +2120,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2290,7 +2132,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2303,7 +2144,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2316,7 +2156,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2329,7 +2168,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2342,7 +2180,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2355,7 +2192,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2368,7 +2204,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2381,7 +2216,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2394,7 +2228,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2407,7 +2240,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2420,7 +2252,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2433,7 +2264,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2446,7 +2276,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2459,7 +2288,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2472,7 +2300,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2485,7 +2312,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2498,7 +2324,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2511,7 +2336,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2524,7 +2348,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2537,7 +2360,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2550,7 +2372,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2563,7 +2384,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2576,7 +2396,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2589,7 +2408,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2602,7 +2420,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2615,7 +2432,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2628,7 +2444,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2641,7 +2456,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2654,7 +2468,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2667,7 +2480,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2680,7 +2492,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2693,7 +2504,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2706,7 +2516,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2719,7 +2528,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2732,7 +2540,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2745,7 +2552,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2758,7 +2564,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2771,7 +2576,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2784,7 +2588,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2797,7 +2600,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2810,7 +2612,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2823,7 +2624,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2836,7 +2636,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2849,7 +2648,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2862,7 +2660,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2875,7 +2672,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2888,7 +2684,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2901,7 +2696,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC08000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2914,7 +2708,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC08000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2927,7 +2720,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2940,7 +2732,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2953,7 +2744,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2966,7 +2756,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2979,7 +2768,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2992,7 +2780,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3005,7 +2792,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3018,7 +2804,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3031,7 +2816,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3044,7 +2828,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3057,7 +2840,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B808000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3070,7 +2852,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3083,7 +2864,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3096,7 +2876,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3109,7 +2888,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3122,7 +2900,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3135,7 +2912,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3148,7 +2924,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3161,7 +2936,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3174,7 +2948,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3187,7 +2960,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3200,7 +2972,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3213,7 +2984,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3226,7 +2996,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3239,7 +3008,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3252,7 +3020,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3265,7 +3032,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3278,7 +3044,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3291,7 +3056,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3304,7 +3068,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3317,7 +3080,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3330,7 +3092,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3343,7 +3104,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3356,7 +3116,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3369,7 +3128,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3382,7 +3140,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3395,7 +3152,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3408,7 +3164,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3421,7 +3176,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3434,7 +3188,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3447,7 +3200,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3460,7 +3212,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3473,7 +3224,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3486,7 +3236,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3499,7 +3248,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3512,7 +3260,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3525,7 +3272,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3538,7 +3284,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3551,7 +3296,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3564,7 +3308,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3577,7 +3320,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3590,7 +3332,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3603,7 +3344,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3616,7 +3356,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3629,7 +3368,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3642,7 +3380,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3655,7 +3392,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3668,7 +3404,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3681,7 +3416,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3694,7 +3428,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3707,7 +3440,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3720,7 +3452,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3733,7 +3464,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3746,7 +3476,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3759,7 +3488,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3772,7 +3500,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3785,7 +3512,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3798,7 +3524,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3811,7 +3536,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3824,7 +3548,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3837,7 +3560,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3850,7 +3572,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3863,7 +3584,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3876,7 +3596,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3889,7 +3608,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3902,7 +3620,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3915,7 +3632,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3928,7 +3644,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3941,7 +3656,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3954,7 +3668,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3967,7 +3680,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3980,7 +3692,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -3993,7 +3704,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4006,7 +3716,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4019,7 +3728,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4032,7 +3740,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4045,7 +3752,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4058,7 +3764,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4071,7 +3776,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4084,7 +3788,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4097,7 +3800,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4110,7 +3812,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4123,7 +3824,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4136,7 +3836,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4149,7 +3848,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4162,7 +3860,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4175,7 +3872,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4188,7 +3884,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4201,7 +3896,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4214,7 +3908,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4227,7 +3920,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4240,7 +3932,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4253,7 +3944,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4266,7 +3956,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4279,7 +3968,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4292,7 +3980,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4305,7 +3992,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4318,7 +4004,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4331,7 +4016,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4344,7 +4028,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4357,7 +4040,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4370,7 +4052,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4383,7 +4064,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4396,7 +4076,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4409,7 +4088,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4422,7 +4100,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4435,7 +4112,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4448,7 +4124,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4461,7 +4136,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4474,7 +4148,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4487,7 +4160,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4500,7 +4172,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4513,7 +4184,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4526,7 +4196,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4539,7 +4208,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4552,7 +4220,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4565,7 +4232,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4578,7 +4244,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4591,7 +4256,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4604,7 +4268,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4617,7 +4280,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4630,7 +4292,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4643,7 +4304,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4656,7 +4316,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4669,7 +4328,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4682,7 +4340,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4695,7 +4352,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4708,7 +4364,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4721,7 +4376,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4734,7 +4388,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4747,7 +4400,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4760,7 +4412,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4773,7 +4424,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4826,7 +4476,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4840,7 +4489,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4854,7 +4502,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4868,7 +4515,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4882,7 +4528,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4896,7 +4541,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4910,7 +4554,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4924,7 +4567,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4938,7 +4580,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4952,7 +4593,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4966,7 +4606,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4980,7 +4619,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -4994,7 +4632,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5008,7 +4645,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5022,7 +4658,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5036,7 +4671,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5050,7 +4684,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5064,7 +4697,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5078,7 +4710,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5092,7 +4723,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5106,7 +4736,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5120,7 +4749,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5134,7 +4762,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5148,7 +4775,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5162,7 +4788,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5176,7 +4801,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5190,7 +4814,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5204,7 +4827,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5218,7 +4840,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5232,7 +4853,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5246,7 +4866,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5260,7 +4879,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5274,7 +4892,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5288,7 +4905,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5302,7 +4918,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5316,7 +4931,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5330,7 +4944,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5344,7 +4957,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5358,7 +4970,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5372,7 +4983,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5386,7 +4996,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5400,7 +5009,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5414,7 +5022,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5428,7 +5035,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5442,7 +5048,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5456,7 +5061,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5470,7 +5074,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5484,7 +5087,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5498,7 +5100,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5512,7 +5113,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5526,7 +5126,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5540,7 +5139,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5554,7 +5152,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5568,7 +5165,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5582,7 +5178,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5596,7 +5191,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5610,7 +5204,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5624,7 +5217,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5638,7 +5230,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5652,7 +5243,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5666,7 +5256,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5680,7 +5269,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5694,7 +5282,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5708,7 +5295,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5722,7 +5308,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5736,7 +5321,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5750,7 +5334,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5764,7 +5347,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5778,7 +5360,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5792,7 +5373,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5806,7 +5386,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5820,7 +5399,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5834,7 +5412,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5848,7 +5425,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5862,7 +5438,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5876,7 +5451,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC0007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5890,7 +5464,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C0007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5904,7 +5477,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C0007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5918,7 +5490,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C0007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5932,7 +5503,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C0007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5946,7 +5516,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5960,7 +5529,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5974,7 +5542,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C0007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -5988,7 +5555,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC0007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6002,7 +5568,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F840007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6016,7 +5581,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10040007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6030,7 +5594,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8040007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6044,7 +5607,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4040007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6058,7 +5620,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1040007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6072,7 +5633,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2040007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6086,7 +5646,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x6040007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6100,7 +5659,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x840007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6114,7 +5672,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B8007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6128,7 +5685,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F900007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6142,7 +5698,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10100007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6156,7 +5711,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8100007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6170,7 +5724,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4100007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6184,7 +5737,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1100007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6198,7 +5750,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2100007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6212,7 +5763,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x900007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6226,7 +5776,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6240,7 +5789,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6254,7 +5802,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6268,7 +5815,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6282,7 +5828,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6296,7 +5841,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6310,7 +5854,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6324,7 +5867,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6338,7 +5880,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6352,7 +5893,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6366,7 +5906,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6380,7 +5919,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6394,7 +5932,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6408,7 +5945,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6422,7 +5958,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6436,7 +5971,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6450,7 +5984,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6464,7 +5997,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6478,7 +6010,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6492,7 +6023,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6506,7 +6036,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6520,7 +6049,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6534,7 +6062,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6548,7 +6075,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6562,7 +6088,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6576,7 +6101,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6590,7 +6114,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6604,7 +6127,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6618,7 +6140,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6632,7 +6153,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6646,7 +6166,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6660,7 +6179,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6674,7 +6192,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6688,7 +6205,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6702,7 +6218,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6716,7 +6231,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6730,7 +6244,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6744,7 +6257,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6758,7 +6270,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6772,7 +6283,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6786,7 +6296,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6800,7 +6309,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6814,7 +6322,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6828,7 +6335,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6842,7 +6348,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6856,7 +6361,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6870,7 +6374,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6884,7 +6387,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6898,7 +6400,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6912,7 +6413,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6926,7 +6426,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6940,7 +6439,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6954,7 +6452,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6968,7 +6465,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6982,7 +6478,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -6996,7 +6491,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7010,7 +6504,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7024,7 +6517,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7038,7 +6530,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7052,7 +6543,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7066,7 +6556,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7080,7 +6569,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7094,7 +6582,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7108,7 +6595,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7122,7 +6608,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7136,7 +6621,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7150,7 +6634,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7164,7 +6647,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7178,7 +6660,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7192,7 +6673,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7206,7 +6686,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7220,7 +6699,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7234,7 +6712,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7248,7 +6725,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7262,7 +6738,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7276,7 +6751,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7290,7 +6764,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7304,7 +6777,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7318,7 +6790,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7332,7 +6803,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7346,7 +6816,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7360,7 +6829,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7374,7 +6842,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7388,7 +6855,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7402,7 +6868,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7416,7 +6881,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7430,7 +6894,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7444,7 +6907,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7458,7 +6920,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7472,7 +6933,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7486,7 +6946,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7500,7 +6959,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7514,7 +6972,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7528,7 +6985,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7542,7 +6998,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7556,7 +7011,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7570,7 +7024,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7584,7 +7037,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7598,7 +7050,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7612,7 +7063,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7626,7 +7076,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7640,7 +7089,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7654,7 +7102,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7668,7 +7115,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7682,7 +7128,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7696,7 +7141,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC08000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7710,7 +7154,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC08000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7724,7 +7167,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7738,7 +7180,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7752,7 +7193,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7766,7 +7206,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7780,7 +7219,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7794,7 +7232,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7808,7 +7245,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7822,7 +7258,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7836,7 +7271,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7850,7 +7284,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7864,7 +7297,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B808000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7878,7 +7310,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7892,7 +7323,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7906,7 +7336,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7920,7 +7349,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7934,7 +7362,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7948,7 +7375,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7962,7 +7388,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7976,7 +7401,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -7990,7 +7414,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8004,7 +7427,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8018,7 +7440,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8032,7 +7453,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8046,7 +7466,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8060,7 +7479,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8074,7 +7492,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8088,7 +7505,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8102,7 +7518,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8116,7 +7531,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8130,7 +7544,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8144,7 +7557,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8158,7 +7570,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8172,7 +7583,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8186,7 +7596,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8200,7 +7609,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8214,7 +7622,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8228,7 +7635,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8242,7 +7648,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8256,7 +7661,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8270,7 +7674,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8284,7 +7687,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8298,7 +7700,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8312,7 +7713,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8326,7 +7726,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8340,7 +7739,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8354,7 +7752,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8368,7 +7765,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8382,7 +7778,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8396,7 +7791,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8410,7 +7804,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8424,7 +7817,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8438,7 +7830,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8452,7 +7843,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8466,7 +7856,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8480,7 +7869,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8494,7 +7882,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8508,7 +7895,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8522,7 +7908,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8536,7 +7921,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8550,7 +7934,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8564,7 +7947,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8578,7 +7960,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8592,7 +7973,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8606,7 +7986,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8620,7 +7999,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8634,7 +8012,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8648,7 +8025,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8662,7 +8038,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8676,7 +8051,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8690,7 +8064,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8704,7 +8077,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8718,7 +8090,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8732,7 +8103,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8746,7 +8116,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8760,7 +8129,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8774,7 +8142,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8788,7 +8155,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8802,7 +8168,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8816,7 +8181,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8830,7 +8194,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8844,7 +8207,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8858,7 +8220,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8872,7 +8233,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8886,7 +8246,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8900,7 +8259,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8914,7 +8272,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8928,7 +8285,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8942,7 +8298,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8956,7 +8311,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8970,7 +8324,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8984,7 +8337,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -8998,7 +8350,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9012,7 +8363,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9026,7 +8376,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9040,7 +8389,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9054,7 +8402,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9068,7 +8415,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9082,7 +8428,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9096,7 +8441,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9110,7 +8454,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9124,7 +8467,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9138,7 +8480,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9152,7 +8493,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9166,7 +8506,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9180,7 +8519,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9194,7 +8532,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9208,7 +8545,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9222,7 +8558,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9236,7 +8571,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9250,7 +8584,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9264,7 +8597,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9278,7 +8610,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9292,7 +8623,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9306,7 +8636,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9320,7 +8649,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9334,7 +8662,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9348,7 +8675,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9362,7 +8688,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9376,7 +8701,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9390,7 +8714,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9404,7 +8727,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83C000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9418,7 +8740,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9432,7 +8753,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9446,7 +8766,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9460,7 +8779,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9474,7 +8792,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9488,7 +8805,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9502,7 +8818,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9516,7 +8831,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9530,7 +8844,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9544,7 +8857,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9558,7 +8870,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9572,7 +8883,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9586,7 +8896,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9600,7 +8909,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9614,7 +8922,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9628,7 +8935,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9642,7 +8948,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9656,7 +8961,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x810000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9670,7 +8974,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x410000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9684,7 +8987,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x110000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9698,7 +9000,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x210000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9712,7 +9013,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -9914,4 +9214,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x40"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/other.json b/tools/perf/pmu-events/arch/x86/cascadelakex/other.json
index bb23a91b0127..60d8a99813b9 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/other.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/other.json
@@ -78,7 +78,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -91,7 +90,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -104,7 +102,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -117,7 +114,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -130,7 +126,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -143,7 +138,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -156,7 +150,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -169,7 +162,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -182,7 +174,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -195,7 +186,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -208,7 +198,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -221,7 +210,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -234,7 +222,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -247,7 +234,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -260,7 +246,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -273,7 +258,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -286,7 +270,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -299,7 +282,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -312,7 +294,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -325,7 +306,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -338,7 +318,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -351,7 +330,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -364,7 +342,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -377,7 +354,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -390,7 +366,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -403,7 +378,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -416,7 +390,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -429,7 +402,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -442,7 +414,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -455,7 +426,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -468,7 +438,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -481,7 +450,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -494,7 +462,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -507,7 +474,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x107F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -520,7 +486,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F804007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -533,7 +498,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x804007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -546,7 +510,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004007F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -559,7 +522,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F800207F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -572,7 +534,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10000207F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -585,7 +546,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8000207F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -598,7 +558,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4000207F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -611,7 +570,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000207F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -624,7 +582,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000207F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -637,7 +594,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800207F7",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -650,7 +606,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -663,7 +618,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -676,7 +630,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -689,7 +642,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -702,7 +654,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -715,7 +666,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -728,7 +678,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -741,7 +690,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -754,7 +702,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -767,7 +714,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -780,7 +726,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -793,7 +738,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -806,7 +750,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -819,7 +762,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -832,7 +774,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -845,7 +786,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -858,7 +798,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -871,7 +810,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -884,7 +822,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -897,7 +834,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -910,7 +846,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -923,7 +858,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -936,7 +870,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -949,7 +882,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -962,7 +894,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -975,7 +906,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -988,7 +918,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1001,7 +930,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1014,7 +942,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1027,7 +954,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1040,7 +966,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1053,7 +978,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1066,7 +990,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1079,7 +1002,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1092,7 +1014,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1105,7 +1026,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1118,7 +1038,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1131,7 +1050,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1144,7 +1062,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1157,7 +1074,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1170,7 +1086,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1183,7 +1098,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1196,7 +1110,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1209,7 +1122,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1222,7 +1134,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x18000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1235,7 +1146,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1248,7 +1158,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1261,7 +1170,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1274,7 +1182,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1287,7 +1194,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1300,7 +1206,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1313,7 +1218,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1326,7 +1230,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1339,7 +1242,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1352,7 +1254,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1365,7 +1266,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1378,7 +1278,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1391,7 +1290,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1404,7 +1302,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1417,7 +1314,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1430,7 +1326,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1443,7 +1338,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1456,7 +1350,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1469,7 +1362,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1482,7 +1374,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1495,7 +1386,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1508,7 +1398,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1521,7 +1410,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1534,7 +1422,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1547,7 +1434,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1560,7 +1446,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1573,7 +1458,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1586,7 +1470,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1599,7 +1482,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1612,7 +1494,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1625,7 +1506,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1638,7 +1518,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1651,7 +1530,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1664,7 +1542,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1677,7 +1554,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1690,7 +1566,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1703,7 +1578,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1716,7 +1590,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1729,7 +1602,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1742,7 +1614,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1755,7 +1626,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1768,7 +1638,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1781,7 +1650,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1794,7 +1662,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1807,7 +1674,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1820,7 +1686,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1833,7 +1698,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1846,7 +1710,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1859,7 +1722,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1872,7 +1734,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1885,7 +1746,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1898,7 +1758,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1911,7 +1770,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1924,7 +1782,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1937,7 +1794,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1950,7 +1806,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1963,7 +1818,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1976,7 +1830,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1989,7 +1842,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2002,7 +1854,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2015,7 +1866,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x800020100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2028,7 +1878,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2041,7 +1890,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2054,7 +1902,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2067,8 +1914,7 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json b/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
index 12eabae3e224..79fda10ec4bb 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
@@ -417,6 +417,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Instruction decoders utilized in a cycle",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x55",
+ "EventName": "INST_DECODED.DECODERS",
+ "PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Instructions retired from execution.",
"Counter": "Fixed counter 0",
"CounterHTOff": "Fixed counter 0",
@@ -969,7 +979,7 @@
"BriefDescription": "Cycles with less than 10 actually retired uops.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "10",
+ "CounterMask": "16",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
"Invert": "1",
@@ -977,4 +987,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
index 2600fd8d7a54..a416515d41da 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
@@ -10,6 +10,16 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "read requests to memory controller",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
"Counter": "0,1,2,3",
"EventCode": "0x4",
@@ -20,6 +30,16 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "write requests to memory controller",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "Memory controller clock ticks",
"Counter": "0,1,2,3",
"EventName": "UNC_M_CLOCKTICKS",
@@ -90,6 +110,15 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "Intel Optane DC persistent memory bandwidth read (MB/sec)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M_PMM_RPQ_INSERTS",
+ "PerPkg": "1",
+ "ScaleUnit": "6.103515625E-5MB/sec",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "Intel Optane DC persistent memory bandwidth write (MB/sec). Derived from unc_m_pmm_wpq_inserts",
"Counter": "0,1,2,3",
"EventCode": "0xE7",
@@ -99,6 +128,15 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "Intel Optane DC persistent memory bandwidth write (MB/sec)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE7",
+ "EventName": "UNC_M_PMM_WPQ_INSERTS",
+ "PerPkg": "1",
+ "ScaleUnit": "6.103515625E-5MB/sec",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "Intel Optane DC persistent memory bandwidth total (MB/sec). Derived from unc_m_pmm_rpq_inserts",
"Counter": "0,1,2,3",
"EventCode": "0xE3",
@@ -110,6 +148,17 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "Intel Optane DC persistent memory bandwidth total (MB/sec)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M_PMM_RPQ_INSERTS",
+ "MetricExpr": "UNC_M_PMM_RPQ_INSERTS + UNC_M_PMM_WPQ_INSERTS",
+ "MetricName": "UNC_M_PMM_BANDWIDTH.TOTAL",
+ "PerPkg": "1",
+ "ScaleUnit": "6.103515625E-5MB/sec",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "Read Pending Queue Occupancy of all read requests for Intel Optane DC persistent memory",
"Counter": "0,1,2,3",
"EventCode": "0xE0",
@@ -131,6 +180,18 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "Intel Optane DC persistent memory read latency (ns)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL",
+ "MetricExpr": "UNC_M_PMM_RPQ_OCCUPANCY.ALL / UNC_M_PMM_RPQ_INSERTS / UNC_M_CLOCKTICKS",
+ "MetricName": "UNC_M_PMM_READ_LATENCY",
+ "PerPkg": "1",
+ "ScaleUnit": "6000000000ns",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "DRAM Page Activate commands sent due to a write request",
"Counter": "0,1,2,3",
"EventCode": "0x1",
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json
index 7f1cf4d8f0fa..aa460d0c4851 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json
@@ -17,6 +17,16 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "LLC misses - Uncacheable reads (from cpu) ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "Filter": "config1=0x40e33",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "MMIO reads. Derived from unc_cha_tor_inserts.ia_miss",
"Counter": "0,1,2,3",
"EventCode": "0x35",
@@ -27,6 +37,16 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "MMIO reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "Filter": "config1=0x40040e33",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "MMIO writes. Derived from unc_cha_tor_inserts.ia_miss",
"Counter": "0,1,2,3",
"EventCode": "0x35",
@@ -37,6 +57,16 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "MMIO writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "Filter": "config1=0x40041e33",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "Streaming stores (full cache line). Derived from unc_cha_tor_inserts.ia_miss",
"Counter": "0,1,2,3",
"EventCode": "0x35",
@@ -48,6 +78,17 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "Streaming stores (full cache line)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "Filter": "config1=0x41833",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "Streaming stores (partial cache line). Derived from unc_cha_tor_inserts.ia_miss",
"Counter": "0,1,2,3",
"EventCode": "0x35",
@@ -59,6 +100,17 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "Streaming stores (partial cache line)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "Filter": "config1=0x41a33",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "read requests from home agent",
"Counter": "0,1,2,3",
"EventCode": "0x50",
@@ -114,6 +166,16 @@
"Unit": "UPI LL"
},
{
+ "BriefDescription": "UPI interconnect send bandwidth for payload",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "ScaleUnit": "7.11E-06Bytes",
+ "UMask": "0xf",
+ "Unit": "UPI LL"
+ },
+ {
"BriefDescription": "PCI Express bandwidth writing at IIO, part 0",
"Counter": "0,1",
"EventCode": "0x83",
@@ -177,6 +239,21 @@
"Unit": "IIO"
},
{
+ "BriefDescription": "PCI Express bandwidth writing at IIO",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "Filter": "ch_mask=0x1f",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "MetricName": "LLC_MISSES.PCIE_WRITE",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
"BriefDescription": "PCI Express bandwidth reading at IIO, part 0",
"Counter": "0,1",
"EventCode": "0x83",
@@ -240,6 +317,21 @@
"Unit": "IIO"
},
{
+ "BriefDescription": "PCI Express bandwidth reading at IIO",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "Filter": "ch_mask=0x1f",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "MetricName": "LLC_MISSES.PCIE_READ",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
"BriefDescription": "Core Cross Snoops Issued; Multiple Core Requests",
"Counter": "0,1,2,3",
"EventCode": "0x33",
@@ -514,7 +606,7 @@
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSP_FWD_WB",
"PerPkg": "1",
- "PublicDescription": "Counts when a transaction with the opcode type Rsp*Fwd*WB Snoop Response was received which indicates the data was written back to it's home socket, and the cacheline was forwarded to the requestor socket. This snoop response is only used in >= 4 socket systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to it's home socket to be written back to memory.",
+ "PublicDescription": "Counts when a transaction with the opcode type Rsp*Fwd*WB Snoop Response was received which indicates the data was written back to its home socket, and the cacheline was forwarded to the requestor socket. This snoop response is only used in >= 4 socket systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to its home socket to be written back to memory.",
"UMask": "0x20",
"Unit": "CHA"
},
@@ -524,7 +616,7 @@
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSP_WBWB",
"PerPkg": "1",
- "PublicDescription": "Counts when a transaction with the opcode type Rsp*WB Snoop Response was received which indicates which indicates the data was written back to it's home. This is returned when a non-RFO request hits a cacheline in the Modified state. The Cache can either downgrade the cacheline to a S (Shared) or I (Invalid) state depending on how the system has been configured. This reponse will also be sent when a cache requests E (Exclusive) ownership of a cache line without receiving data, because the cache must acquire ownership.",
+ "PublicDescription": "Counts when a transaction with the opcode type Rsp*WB Snoop Response was received which indicates which indicates the data was written back to its home. This is returned when a non-RFO request hits a cacheline in the Modified state. The Cache can either downgrade the cacheline to a S (Shared) or I (Invalid) state depending on how the system has been configured. This response will also be sent when a cache requests E (Exclusive) ownership of a cache line without receiving data, because the cache must acquire ownership.",
"UMask": "0x10",
"Unit": "CHA"
},
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/other.json b/tools/perf/pmu-events/arch/x86/elkhartlake/other.json
index de55b199ba79..8692d4847476 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/other.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/other.json
@@ -1,16 +1,5 @@
[
{
- "BriefDescription": "Counts the total number of BTCLEARS.",
- "CollectPEBSRecord": "2",
- "Counter": "0,1,2,3",
- "EventCode": "0xe8",
- "EventName": "BTCLEAR.ANY",
- "PDIR_COUNTER": "na",
- "PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the total number of BTCLEARS which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
- "SampleAfterValue": "200003"
- },
- {
"BriefDescription": "This event is deprecated. Refer to new event BUS_LOCK.SELF_LOCKS",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
@@ -180,4 +169,4 @@
"SampleAfterValue": "100003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json b/tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json
index 31816c6543a8..c18acb422145 100644
--- a/tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json
@@ -154,6 +154,17 @@
"UMask": "0xfe"
},
{
+ "BriefDescription": "Counts the total number of BTCLEARS.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xe8",
+ "EventName": "BTCLEAR.ANY",
+ "PDIR_COUNTER": "na",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the total number of BTCLEARS which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
+ "SampleAfterValue": "200003"
+ },
+ {
"BriefDescription": "Counts the number of unhalted core clock cycles. (Fixed event)",
"CollectPEBSRecord": "2",
"Counter": "Fixed counter 1",
@@ -516,4 +527,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/other.json b/tools/perf/pmu-events/arch/x86/goldmont/other.json
index e4605e636447..d888f67aa2ea 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/other.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/other.json
@@ -47,34 +47,5 @@
"PublicDescription": "Counts hardware interrupts received by the processor.",
"SampleAfterValue": "203",
"UMask": "0x1"
- },
- {
- "BriefDescription": "Unfilled issue slots per cycle",
- "CollectPEBSRecord": "1",
- "Counter": "0,1,2,3",
- "EventCode": "0xCA",
- "EventName": "ISSUE_SLOTS_NOT_CONSUMED.ANY",
- "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend due to either a full resource in the backend (RESOURCE_FULL) or due to the processor recovering from some event (RECOVERY).",
- "SampleAfterValue": "200003"
- },
- {
- "BriefDescription": "Unfilled issue slots per cycle to recover",
- "CollectPEBSRecord": "1",
- "Counter": "0,1,2,3",
- "EventCode": "0xCA",
- "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
- "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
- "SampleAfterValue": "200003",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend",
- "CollectPEBSRecord": "1",
- "Counter": "0,1,2,3",
- "EventCode": "0xCA",
- "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
- "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
- "SampleAfterValue": "200003",
- "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
index cb9155c3836d..5dba4313013f 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
@@ -246,6 +246,35 @@
"SampleAfterValue": "2000003"
},
{
+ "BriefDescription": "Unfilled issue slots per cycle",
+ "CollectPEBSRecord": "1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xCA",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.ANY",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend due to either a full resource in the backend (RESOURCE_FULL) or due to the processor recovering from some event (RECOVERY).",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Unfilled issue slots per cycle to recover",
+ "CollectPEBSRecord": "1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xCA",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend",
+ "CollectPEBSRecord": "1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xCA",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Loads blocked because address has 4k partial address false dependence (Precise event capable)",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
@@ -379,4 +408,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/other.json b/tools/perf/pmu-events/arch/x86/goldmontplus/other.json
index 3378f48cb818..92586fe4538a 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/other.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/other.json
@@ -57,40 +57,5 @@
"PublicDescription": "Counts hardware interrupts received by the processor.",
"SampleAfterValue": "203",
"UMask": "0x1"
- },
- {
- "BriefDescription": "Unfilled issue slots per cycle",
- "CollectPEBSRecord": "1",
- "Counter": "0,1,2,3",
- "EventCode": "0xCA",
- "EventName": "ISSUE_SLOTS_NOT_CONSUMED.ANY",
- "PDIR_COUNTER": "na",
- "PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend due to either a full resource in the backend (RESOURCE_FULL) or due to the processor recovering from some event (RECOVERY).",
- "SampleAfterValue": "200003"
- },
- {
- "BriefDescription": "Unfilled issue slots per cycle to recover",
- "CollectPEBSRecord": "1",
- "Counter": "0,1,2,3",
- "EventCode": "0xCA",
- "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
- "PDIR_COUNTER": "na",
- "PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
- "SampleAfterValue": "200003",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend",
- "CollectPEBSRecord": "1",
- "Counter": "0,1,2,3",
- "EventCode": "0xCA",
- "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
- "PDIR_COUNTER": "na",
- "PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
- "SampleAfterValue": "200003",
- "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
index 8305e2ecf617..4d7e3129e5ac 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
@@ -291,6 +291,41 @@
"SampleAfterValue": "2000003"
},
{
+ "BriefDescription": "Unfilled issue slots per cycle",
+ "CollectPEBSRecord": "1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xCA",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.ANY",
+ "PDIR_COUNTER": "na",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend due to either a full resource in the backend (RESOURCE_FULL) or due to the processor recovering from some event (RECOVERY).",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Unfilled issue slots per cycle to recover",
+ "CollectPEBSRecord": "1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xCA",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
+ "PDIR_COUNTER": "na",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend",
+ "CollectPEBSRecord": "1",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xCA",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
+ "PDIR_COUNTER": "na",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Loads blocked because address has 4k partial address false dependence (Precise event capable)",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
@@ -456,4 +491,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/cache.json b/tools/perf/pmu-events/arch/x86/icelake/cache.json
index 375ce490833c..9989f3338f0a 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/cache.json
@@ -563,7 +563,6 @@
"MSRValue": "0x3FC03C0004",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -578,7 +577,6 @@
"MSRValue": "0x10003C0004",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -593,7 +591,6 @@
"MSRValue": "0x4003C0004",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -608,7 +605,6 @@
"MSRValue": "0x2003C0004",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -623,7 +619,6 @@
"MSRValue": "0x1003C0004",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -638,7 +633,6 @@
"MSRValue": "0x1E003C0004",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -653,7 +647,6 @@
"MSRValue": "0x3FC03C0001",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -668,7 +661,6 @@
"MSRValue": "0x10003C0001",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -683,7 +675,6 @@
"MSRValue": "0x4003C0001",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -698,7 +689,6 @@
"MSRValue": "0x2003C0001",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -713,7 +703,6 @@
"MSRValue": "0x1003C0001",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -728,7 +717,6 @@
"MSRValue": "0x1E003C0001",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -743,7 +731,6 @@
"MSRValue": "0x3FC03C0002",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -758,7 +745,6 @@
"MSRValue": "0x10003C0002",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -773,7 +759,6 @@
"MSRValue": "0x4003C0002",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -788,7 +773,6 @@
"MSRValue": "0x2003C0002",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -803,7 +787,6 @@
"MSRValue": "0x1003C0002",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -818,7 +801,6 @@
"MSRValue": "0x1E003C0002",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -833,7 +815,6 @@
"MSRValue": "0x3FC03C0400",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -848,7 +829,6 @@
"MSRValue": "0x2003C0400",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -863,7 +843,6 @@
"MSRValue": "0x1003C0400",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -878,7 +857,6 @@
"MSRValue": "0x3FC03C0010",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -893,7 +871,6 @@
"MSRValue": "0x10003C0010",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -908,7 +885,6 @@
"MSRValue": "0x4003C0010",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -923,7 +899,6 @@
"MSRValue": "0x2003C0010",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -938,7 +913,6 @@
"MSRValue": "0x1003C0010",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -953,7 +927,6 @@
"MSRValue": "0x1E003C0010",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -968,7 +941,6 @@
"MSRValue": "0x3FC03C0020",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -983,7 +955,6 @@
"MSRValue": "0x10003C0020",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -998,7 +969,6 @@
"MSRValue": "0x4003C0020",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -1013,7 +983,6 @@
"MSRValue": "0x2003C0020",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -1028,7 +997,6 @@
"MSRValue": "0x1003C0020",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -1043,7 +1011,6 @@
"MSRValue": "0x1E003C0020",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -1058,7 +1025,6 @@
"MSRValue": "0x3FC03C2380",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -1073,7 +1039,6 @@
"MSRValue": "0x4003C8000",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -1088,7 +1053,6 @@
"MSRValue": "0x2003C8000",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -1103,7 +1067,6 @@
"MSRValue": "0x1003C8000",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -1118,7 +1081,6 @@
"MSRValue": "0x1E003C8000",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -1133,7 +1095,6 @@
"MSRValue": "0x3FC03C0800",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -1308,4 +1269,4 @@
"Speculative": "1",
"UMask": "0x4"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
index 4af23c04dc18..622c392f59be 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
@@ -18,18 +18,6 @@
"MetricName": "IPC"
},
{
- "BriefDescription": "Uops Per Instruction",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline;Ret;Retire",
- "MetricName": "UPI"
- },
- {
- "BriefDescription": "Instruction per taken branch",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;Fed;FetchBW",
- "MetricName": "UpTB"
- },
- {
"BriefDescription": "Cycles Per Instruction (per Logical Processor)",
"MetricExpr": "1 / (INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD)",
"MetricGroup": "Pipeline;Mem",
@@ -409,12 +397,6 @@
"MetricName": "IpFarBranch"
},
{
- "BriefDescription": "C3 residency percent per core",
- "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Core_Residency"
- },
- {
"BriefDescription": "C6 residency percent per core",
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
@@ -449,5 +431,23 @@
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"MetricName": "C7_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C8 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c8\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C8_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C9 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c9\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C9_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C10 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c10\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C10_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/memory.json b/tools/perf/pmu-events/arch/x86/icelake/memory.json
index f045e1f6a868..a6f43cbc2d0a 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/memory.json
@@ -239,7 +239,6 @@
"MSRValue": "0x3FFFC00004",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -254,7 +253,6 @@
"MSRValue": "0x3FFFC00001",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -269,7 +267,6 @@
"MSRValue": "0x3FFFC00002",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -284,7 +281,6 @@
"MSRValue": "0x3FFFC00400",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -299,7 +295,6 @@
"MSRValue": "0x3FFFC00010",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -314,7 +309,6 @@
"MSRValue": "0x3FFFC00020",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -329,7 +323,6 @@
"MSRValue": "0x3FFFC08000",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -344,7 +337,6 @@
"MSRValue": "0x3FFFC00800",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -570,4 +562,4 @@
"Speculative": "1",
"UMask": "0x40"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/other.json b/tools/perf/pmu-events/arch/x86/icelake/other.json
index 08f6321025e8..3055710595c4 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/other.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/other.json
@@ -1,17 +1,5 @@
[
{
- "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
- "CollectPEBSRecord": "2",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xc1",
- "EventName": "ASSISTS.ANY",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
- "SampleAfterValue": "100003",
- "Speculative": "1",
- "UMask": "0x7"
- },
- {
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
@@ -57,7 +45,6 @@
"MSRValue": "0x10004",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -72,7 +59,6 @@
"MSRValue": "0x184000004",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -87,7 +73,6 @@
"MSRValue": "0x184000004",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -102,7 +87,6 @@
"MSRValue": "0x10001",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -117,7 +101,6 @@
"MSRValue": "0x184000001",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -132,7 +115,6 @@
"MSRValue": "0x184000001",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -147,7 +129,6 @@
"MSRValue": "0x10002",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -162,7 +143,6 @@
"MSRValue": "0x184000002",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -177,7 +157,6 @@
"MSRValue": "0x184000002",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -192,7 +171,6 @@
"MSRValue": "0x10400",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -207,7 +185,6 @@
"MSRValue": "0x184000400",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -222,7 +199,6 @@
"MSRValue": "0x184000400",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -237,7 +213,6 @@
"MSRValue": "0x10010",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -252,7 +227,6 @@
"MSRValue": "0x184000010",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -267,7 +241,6 @@
"MSRValue": "0x184000010",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -282,7 +255,6 @@
"MSRValue": "0x10020",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -297,7 +269,6 @@
"MSRValue": "0x184000020",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -312,7 +283,6 @@
"MSRValue": "0x184000020",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -327,7 +297,6 @@
"MSRValue": "0x18000",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -342,7 +311,6 @@
"MSRValue": "0x184008000",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -357,7 +325,6 @@
"MSRValue": "0x184008000",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -372,7 +339,6 @@
"MSRValue": "0x10800",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -387,7 +353,6 @@
"MSRValue": "0x184000800",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
@@ -402,9 +367,8 @@
"MSRValue": "0x184000800",
"Offcore": "1",
"PEBScounters": "0,1,2,3",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"Speculative": "1",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/pipeline.json b/tools/perf/pmu-events/arch/x86/icelake/pipeline.json
index 573ac7ac8879..a017a4727050 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/pipeline.json
@@ -13,6 +13,18 @@
"UMask": "0x9"
},
{
+ "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.ANY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x7"
+ },
+ {
"BriefDescription": "All branch instructions retired.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
@@ -441,6 +453,18 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Instruction decoders utilized in a cycle",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x55",
+ "EventName": "INST_DECODED.DECODERS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
+ "SampleAfterValue": "2000003",
+ "Speculative": "1",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
"CollectPEBSRecord": "2",
"Counter": "Fixed counter 0",
@@ -1102,4 +1126,4 @@
"SampleAfterValue": "1000003",
"UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/cache.json b/tools/perf/pmu-events/arch/x86/icelakex/cache.json
index 3c4da0371df9..95fcbec188f8 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/cache.json
@@ -665,7 +665,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -677,7 +676,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -689,7 +687,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1008000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -701,7 +698,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x808000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -713,7 +709,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -725,7 +720,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -737,7 +731,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -749,7 +742,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -761,7 +753,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1030000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -773,7 +764,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x830000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -785,7 +775,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1008000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -797,7 +786,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x808000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -809,7 +797,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -821,7 +808,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -833,7 +819,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1008000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -845,7 +830,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x808000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -857,7 +841,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -869,7 +852,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80082380",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -881,7 +863,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C27F0",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -893,7 +874,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F003C0477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -905,7 +885,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -917,7 +896,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -929,7 +907,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -941,7 +918,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1830000477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -953,7 +929,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1030000477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -965,7 +940,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x830000477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -977,7 +951,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1008000477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -989,7 +962,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x808000477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1001,7 +973,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080800",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1200,4 +1171,4 @@
"Speculative": "1",
"UMask": "0x4"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
index a737fa40feb0..be70672bfdb0 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
@@ -475,10 +475,10 @@
"MetricName": "IpFarBranch"
},
{
- "BriefDescription": "C3 residency percent per core",
- "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "C1 residency percent per core",
+ "MetricExpr": "(cstate_core@c1\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
- "MetricName": "C3_Core_Residency"
+ "MetricName": "C1_Core_Residency"
},
{
"BriefDescription": "C6 residency percent per core",
@@ -487,33 +487,15 @@
"MetricName": "C6_Core_Residency"
},
{
- "BriefDescription": "C7 residency percent per core",
- "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C7_Core_Residency"
- },
- {
"BriefDescription": "C2 residency percent per package",
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"MetricName": "C2_Pkg_Residency"
},
{
- "BriefDescription": "C3 residency percent per package",
- "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Pkg_Residency"
- },
- {
"BriefDescription": "C6 residency percent per package",
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
"MetricGroup": "Power",
"MetricName": "C6_Pkg_Residency"
- },
- {
- "BriefDescription": "C7 residency percent per package",
- "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C7_Pkg_Residency"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/memory.json b/tools/perf/pmu-events/arch/x86/icelakex/memory.json
index c10a1bbc66b1..58b03a8a1b95 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/memory.json
@@ -159,7 +159,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -171,7 +170,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -183,7 +181,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -195,7 +192,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -207,7 +203,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F3FC00002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -219,7 +214,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F04400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -231,7 +225,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC00400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -243,7 +236,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84400400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -255,7 +247,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x94002380",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -267,7 +258,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84002380",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -279,7 +269,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -291,7 +280,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBFC08000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -303,7 +291,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F84408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -315,7 +302,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F844027F0",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -327,7 +313,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F3FC00477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -339,7 +324,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F04400477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -351,7 +335,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x70CC00477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -363,7 +346,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x94000800",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -375,7 +357,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000800",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -565,4 +546,4 @@
"Speculative": "1",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/other.json b/tools/perf/pmu-events/arch/x86/icelakex/other.json
index 1246b22769da..c9bf6808ead7 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/other.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/other.json
@@ -1,17 +1,5 @@
[
{
- "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
- "CollectPEBSRecord": "2",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xc1",
- "EventName": "ASSISTS.ANY",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
- "SampleAfterValue": "100003",
- "Speculative": "1",
- "UMask": "0x7"
- },
- {
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
@@ -139,7 +127,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -151,7 +138,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x73C000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -163,7 +149,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -175,7 +160,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x708000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -187,7 +171,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -199,7 +182,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x73C000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -211,7 +193,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -223,7 +204,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -235,7 +215,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x703C00001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -247,7 +226,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x730000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -259,7 +237,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x703000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -271,19 +248,17 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x708000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads that (IC) were supplied by PMM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "BriefDescription": "Counts demand data reads that were supplied by PMM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.SNC_PMM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x700800001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -295,7 +270,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F3FFC0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -307,7 +281,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x73C000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -319,7 +292,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -331,7 +303,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -343,7 +314,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x703C00002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -355,7 +325,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x703000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -367,19 +336,17 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x708000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that (IC) were supplied by PMM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by PMM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_RFO.SNC_PMM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x700800002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -391,7 +358,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x73C000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -403,7 +369,17 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch (which bring data to L2) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10070",
+ "Offcore": "1",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -415,7 +391,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x12380",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -427,7 +402,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90002380",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -439,7 +413,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x90000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -451,7 +424,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x18000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -463,7 +435,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F3FFC0477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -475,7 +446,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x73C000477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -487,7 +457,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -499,7 +468,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -511,7 +479,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x70C000477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -523,7 +490,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x700C00477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -535,7 +501,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F33000477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -547,7 +512,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x730000477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -559,7 +523,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x703000477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -571,19 +534,17 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x708000477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that (IC) were supplied by PMM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.READS_TO_CORE.SNC_PMM",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x700800477",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -595,8 +556,7 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10800",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json b/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
index 068a3d46b443..95c1008ef057 100644
--- a/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
@@ -13,6 +13,18 @@
"UMask": "0x9"
},
{
+ "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.ANY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
+ "SampleAfterValue": "100003",
+ "Speculative": "1",
+ "UMask": "0x7"
+ },
+ {
"BriefDescription": "All branch instructions retired.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
@@ -1076,4 +1088,4 @@
"SampleAfterValue": "1000003",
"UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json b/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
index 2de31c56c2a5..d89d3f8db190 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
@@ -676,7 +676,7 @@
"UMask": "0x3"
},
{
- "BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc.)",
+ "BriefDescription": "Number of occurrences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc.)",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
@@ -1269,4 +1269,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json
index df4b43294fa0..e8917cb59566 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json
@@ -5,8 +5,7 @@
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.RD",
"PerPkg": "1",
- "UMask": "0x1",
- "Umask": "0x3",
+ "UMask": "0x3",
"Unit": "iMC"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index 963a76fec277..f5a382421a60 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -44,6 +44,7 @@ GenuineIntel-6-86,v1,tremontx,core
GenuineIntel-6-96,v1,elkhartlake,core
GenuineIntel-6-97,v1,alderlake,core
GenuineIntel-6-9A,v1,alderlake,core
+GenuineIntel-6-8F,v1,sapphirerapids,core
AuthenticAMD-23-([12][0-9A-F]|[0-9A-F]),v2,amdzen1,core
AuthenticAMD-23-[[:xdigit:]]+,v1,amdzen2,core
AuthenticAMD-25-[[:xdigit:]]+,v1,amdzen3,core
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/other.json b/tools/perf/pmu-events/arch/x86/nehalemep/other.json
index 710b106ce12a..f6887b234b0e 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/other.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/other.json
@@ -1,29 +1,5 @@
[
{
- "BriefDescription": "Early Branch Prediciton Unit clears",
- "Counter": "0,1,2,3",
- "EventCode": "0xE8",
- "EventName": "BPU_CLEARS.EARLY",
- "SampleAfterValue": "2000000",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Late Branch Prediction Unit clears",
- "Counter": "0,1,2,3",
- "EventCode": "0xE8",
- "EventName": "BPU_CLEARS.LATE",
- "SampleAfterValue": "2000000",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "Branch prediction unit missed call or return",
- "Counter": "0,1,2,3",
- "EventCode": "0xE5",
- "EventName": "BPU_MISSED_CALL_RET",
- "SampleAfterValue": "2000000",
- "UMask": "0x1"
- },
- {
"BriefDescription": "ES segment renames",
"Counter": "0,1,2,3",
"EventCode": "0xD5",
@@ -120,46 +96,6 @@
"UMask": "0x1"
},
{
- "BriefDescription": "All RAT stall cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xD2",
- "EventName": "RAT_STALLS.ANY",
- "SampleAfterValue": "2000000",
- "UMask": "0xf"
- },
- {
- "BriefDescription": "Flag stall cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xD2",
- "EventName": "RAT_STALLS.FLAGS",
- "SampleAfterValue": "2000000",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Partial register stall cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xD2",
- "EventName": "RAT_STALLS.REGISTERS",
- "SampleAfterValue": "2000000",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "ROB read port stalls cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xD2",
- "EventName": "RAT_STALLS.ROB_READ_PORT",
- "SampleAfterValue": "2000000",
- "UMask": "0x4"
- },
- {
- "BriefDescription": "Scoreboard stall cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xD2",
- "EventName": "RAT_STALLS.SCOREBOARD",
- "SampleAfterValue": "2000000",
- "UMask": "0x8"
- },
- {
"BriefDescription": "All Store buffer stall cycles",
"Counter": "0,1,2,3",
"EventCode": "0x4",
@@ -207,4 +143,4 @@
"SampleAfterValue": "2000000",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/pipeline.json b/tools/perf/pmu-events/arch/x86/nehalemep/pipeline.json
index e64d685c128a..6fc1a6efd8e8 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/pipeline.json
@@ -51,6 +51,30 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Early Branch Prediciton Unit clears",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE8",
+ "EventName": "BPU_CLEARS.EARLY",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Late Branch Prediction Unit clears",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE8",
+ "EventName": "BPU_CLEARS.LATE",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Branch prediction unit missed call or return",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE5",
+ "EventName": "BPU_MISSED_CALL_RET",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Branch instructions decoded",
"Counter": "0,1,2,3",
"EventCode": "0xE0",
@@ -477,6 +501,46 @@
"UMask": "0x4"
},
{
+ "BriefDescription": "All RAT stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.ANY",
+ "SampleAfterValue": "2000000",
+ "UMask": "0xf"
+ },
+ {
+ "BriefDescription": "Flag stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.FLAGS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Partial register stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.REGISTERS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "ROB read port stalls cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.ROB_READ_PORT",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Scoreboard stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.SCOREBOARD",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x8"
+ },
+ {
"BriefDescription": "Resource related stall cycles",
"Counter": "0,1,2,3",
"EventCode": "0xA2",
@@ -878,4 +942,4 @@
"SampleAfterValue": "2000000",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json
new file mode 100644
index 000000000000..6fa723c9a6f6
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json
@@ -0,0 +1,1083 @@
+[
+ {
+ "BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailablability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailablability.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL_PERIODS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailablability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event L1D_PEND_MISS.L2_STALLS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.L2_STALL",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.L2_STALLS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of L1D misses that are outstanding",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L2 cache lines filling L2",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.ALL",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
+ },
+ {
+ "BriefDescription": "L2_LINES_OUT.NON_SILENT",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache when triggered by an L2 cache fill.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.SILENT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "All L2 requests.[This event is alias to L2_RQSTS.REFERENCES]",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.ALL",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts all L2 requests.[This event is alias to L2_RQSTS.REFERENCES]",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
+ },
+ {
+ "BriefDescription": "Read requests with true-miss in L2 cache.[This event is alias to L2_RQSTS.MISS]",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses.[This event is alias to L2_RQSTS.MISS]",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3f"
+ },
+ {
+ "BriefDescription": "L2 code requests",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the total number of L2 code requests.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe4"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe1"
+ },
+ {
+ "BriefDescription": "Demand requests that miss L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts demand requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x27"
+ },
+ {
+ "BriefDescription": "Demand requests to L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts demand requests to L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe7"
+ },
+ {
+ "BriefDescription": "RFO requests to L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe2"
+ },
+ {
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4"
+ },
+ {
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x24"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1"
+ },
+ {
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Read requests with true-miss in L2 cache.[This event is alias to L2_REQUEST.MISS]",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses.[This event is alias to L2_REQUEST.MISS]",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3f"
+ },
+ {
+ "BriefDescription": "All L2 requests.[This event is alias to L2_REQUEST.ALL]",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts all L2 requests.[This event is alias to L2_REQUEST.ALL]",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
+ },
+ {
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
+ },
+ {
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x22"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that hit L2 cache.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts Software prefetch requests that hit the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc8"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that miss L2 cache.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_MISS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts Software prefetch requests that miss the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x28"
+ },
+ {
+ "BriefDescription": "LONGEST_LAT_CACHE.MISS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "All retired load instructions.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions for loads.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81"
+ },
+ {
+ "BriefDescription": "All retired store instructions.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "L1_Hit_Indication": "1",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts all retired store instructions. This event account for SW prefetch instructions and PREFETCHW instruction for stores.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "All retired memory instructions.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ANY",
+ "L1_Hit_Indication": "1",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts all retired memory instructions - loads and stores.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x83"
+ },
+ {
+ "BriefDescription": "Retired load instructions with locked access.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with locked access.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "L1_Hit_Indication": "1",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x42"
+ },
+ {
+ "BriefDescription": "Retired load instructions that miss the STLB.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Retired store instructions that miss the STLB.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "L1_Hit_Indication": "1",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x12"
+ },
+ {
+ "BriefDescription": "Completed demand load uops that miss the L1 d-cache.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "MEM_LOAD_COMPLETED.L1_MISS_ANY",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Number of completed demand load requests that missed the L1 data cache including shadow misses (FB hits, merge to an ongoing L1D miss)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xfd"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions with remote Intel Optane DC persistent memory as the data source where the data request missed all caches.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with remote Intel Optane DC persistent memory as the data source and the data request missed L3.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd4",
+ "EventName": "MEM_LOAD_MISC_RETIRED.UC",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Retired load instructions with local Intel Optane DC persistent memory as the data source where the data request missed all caches.",
+ "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.LOCAL_PMM",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts retired load instructions with local Intel Optane DC persistent memory as the data source and the data request missed L3.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "MEM_STORE_RETIRED.L2_HIT",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x44",
+ "EventName": "MEM_STORE_RETIRED.L2_HIT",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired memory uops for any access",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe5",
+ "EventName": "MEM_UOP_RETIRED.ANY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of retired micro-operations (uops) for load or store memory accesses",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0004",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0004",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.SNC_CACHE.HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1008000004",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.SNC_CACHE.HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x808000004",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that resulted in a snoop that hit in another core, which did not forward the data.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1030000001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x830000001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1008000001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x808000001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0002",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.SNC_CACHE.HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1008000002",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.SNC_CACHE.HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x808000002",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L3.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80082380",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F003C4477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C4477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop that hit in another core, which did not forward the data.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C4477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C4477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop was sent and data was returned (Modified or Not Modified).",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1830004477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1030004477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x830004477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.SNC_CACHE.HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1008004477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.SNC_CACHE.HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x808004477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.STREAMING_WR.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80080800",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Demand and prefetch data reads",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DATA_RD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of PREFETCHW instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json
new file mode 100644
index 000000000000..53d35dddd313
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json
@@ -0,0 +1,218 @@
+[
+ {
+ "BriefDescription": "ARITH.FPDIV_ACTIVE",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.FPDIV_ACTIVE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all microcode FP assists.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.FP",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts all microcode Floating Point assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "ASSISTS.SSE_AVX_MIX",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.SSE_AVX_MIX",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.PORT_0",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.PORT_1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.PORT_5",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "FP_ARITH_INST_RETIRED2.128B_PACKED_HALF",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.128B_PACKED_HALF",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "FP_ARITH_INST_RETIRED2.256B_PACKED_HALF",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.256B_PACKED_HALF",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "FP_ARITH_INST_RETIRED2.512B_PACKED_HALF",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.512B_PACKED_HALF",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of all Scalar Half-Precision FP arithmetic instructions(1) retired - regular and complex.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.SCALAR",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "FP_ARITH_INST_RETIRED2.SCALAR",
+ "SampleAfterValue": "100003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "FP_ARITH_INST_RETIRED2.SCALAR_HALF",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.SCALAR_HALF",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of all Vector (also called packed) Half-Precision FP arithmetic instructions(1) retired.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.VECTOR",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "FP_ARITH_INST_RETIRED2.VECTOR",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1c"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json
new file mode 100644
index 000000000000..04ba0269c73c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json
@@ -0,0 +1,471 @@
+[
+ {
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x87",
+ "EventName": "DECODE.LCP",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "DSB-to-MITE switch true penalty cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x61",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x1",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x11",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x14",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L1I_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x12",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L2_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x13",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600106",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x608006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x601006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600206",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x610006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x100206",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x602006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600406",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x620006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x604006",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600806",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "FRONTEND_RETIRED.MS_FLOWS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.MS_FLOWS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x8",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x15",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x17",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_DATA.STALLS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The decode pipeline works at a 32 Byte granularity.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_ANY",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "6",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_OK",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_UOPS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_ANY",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering optimal number of Uops",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "6",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_OK",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_UOPS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_CYCLES_ANY",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Uops delivered to IDQ while MS is busy",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS).",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "6",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json
new file mode 100644
index 000000000000..7436ced3e04e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json
@@ -0,0 +1,415 @@
+[
+ {
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "6",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6"
+ },
+ {
+ "BriefDescription": "Number of machine clears due to memory ordering conflicts.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "2",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.CYCLES_L1D_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "3",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.STALLS_L1D_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "MEMORY_ACTIVITY.STALLS_L2_MISS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "5",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.STALLS_L2_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "MEMORY_ACTIVITY.STALLS_L3_MISS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "9",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.STALLS_L3_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PEBScounters": "1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "1009",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PEBScounters": "1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "20011",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PEBScounters": "1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "503",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PEBScounters": "1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100007",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PEBScounters": "1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100003",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PEBScounters": "1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "101",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PEBScounters": "1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "2003",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "CollectPEBSRecord": "2",
+ "Counter": "1,2,3,4,5,6,7",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PEBScounters": "1,2,3,4,5,6,7",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "50021",
+ "TakenAlone": "1",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions with at least 1 store uop. This PEBS event is the trigger for stores sampled by the PEBS Store Facility.",
+ "CollectPEBSRecord": "2",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00004",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FC00002",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that missed the local socket's L1, L2, and L3 caches.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L3.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x94002380",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L3.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84002380",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FC04477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that missed the L3 Cache and were supplied by the local socket (DRAM or PMM), whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM or DRAM accesses that are controlled by the close or distant SNC Cluster. It does not count misses to the L3 which go to Local CXL Type 2 Memory or Local Non DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL_SOCKET",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x70CC04477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that missed the local socket's L1, L2, and L3 caches.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.STREAMING_WR.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x94000800",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.STREAMING_WR.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000800",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times RTM abort was triggered.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_EVENTS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MEM",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to incompatible memory type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution successfully committed",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times RTM commit succeeded.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution started.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.START",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional reads",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_READ",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional reads",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional writes.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional writes.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json
new file mode 100644
index 000000000000..7d6f8e25bb10
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json
@@ -0,0 +1,362 @@
+[
+ {
+ "BriefDescription": "ASSISTS.PAGE_FAULT",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.PAGE_FAULT",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the cycles where the AMX (Advance Matrix Extension) unit is busy performing an operation.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb7",
+ "EventName": "EXE.AMX_BUSY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000004",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000004",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000004",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x730000001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by PMM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703000001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000001",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FFC0002",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000002",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000002",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000002",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches (which bring data to L2) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L2.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10070",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L3.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x12380",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline was homed in a remote socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L3.REMOTE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x90002380",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FFC4477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C004477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104004477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x70C004477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM accesses that are controlled by the close or distant SNC Cluster.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x700C04477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F33004477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x730004477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM or PMM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_MEMORY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x733004477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to another socket.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703004477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708004477",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts Demand RFOs, ItoM's, PREFECTHW's, Hardware RFO Prefetches to the L1/L2 and Streaming stores that likely resulted in a store to Memory (DRAM or PMM)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.WRITE_ESTIMATE.MEMORY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xFBFF80822",
+ "Offcore": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa5",
+ "EventName": "RS_EMPTY.CYCLES",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "XQ.FULL_CYCLES",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x2d",
+ "EventName": "XQ.FULL_CYCLES",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json
new file mode 100644
index 000000000000..b0920f5b25ed
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json
@@ -0,0 +1,1283 @@
+[
+ {
+ "BriefDescription": "AMX_OPS_RETIRED.BF16",
+ "EventCode": "0xce",
+ "EventName": "AMX_OPS_RETIRED.BF16",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "AMX_OPS_RETIRED.INT8",
+ "EventCode": "0xce",
+ "EventName": "AMX_OPS_RETIRED.INT8",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event ARITH.DIV_ACTIVE",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.DIVIDER_ACTIVE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9"
+ },
+ {
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.DIV_ACTIVE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event ARITH.FPDIV_ACTIVE",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.FP_DIVIDER_ACTIVE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event counts the cycles the integer divider is busy.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.IDIV_ACTIVE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "ARITH.IDIV_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event ARITH.IDIV_ACTIVE",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.INT_DIVIDER_ACTIVE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.ANY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
+ },
+ {
+ "BriefDescription": "All branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts all branch instructions retired.",
+ "SampleAfterValue": "400009"
+ },
+ {
+ "BriefDescription": "Conditional branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Not taken branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Taken conditional branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts taken conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Far branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Indirect near branch instructions retired (excluding returns)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Return instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Taken branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "All mispredicted branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "400009"
+ },
+ {
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "number of branch instructions retired that were mispredicted and taken. Non PEBS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Miss-predicted near indirect branch instructions retired (excluding returns)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Mispredicted indirect CALL retired.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PEBS": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C01",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C02",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.1 or C0.2 or running a PAUSE in C0 ACPI state.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C0_WAIT",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts core clocks when the thread is in the C0.1 or C0.2 power saving optimized states (TPAUSE or UMWAIT instructions) or running the PAUSE instruction.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x70"
+ },
+ {
+ "BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
+ "SampleAfterValue": "25003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "CPU_CLK_UNHALTED.PAUSE",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.PAUSE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "CPU_CLK_UNHALTED.PAUSE_INST",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.PAUSE_INST",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "CollectPEBSRecord": "2",
+ "Counter": "Fixed counter 2",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PEBScounters": "34",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "CollectPEBSRecord": "2",
+ "Counter": "Fixed counter 1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PEBScounters": "33",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "8",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "16",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "12",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc"
+ },
+ {
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "5",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Total execution stalls.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "5",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_LOADS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "2",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Cycles no uop executed while RS was not empty, the SB was not full and there was no outstanding load.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of cycles total of 0 uops executed on all ports, Reservation Station (RS) was not empty, the Store Buffer (SB) was not full and there was no outstanding load.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Instruction decoders utilized in a cycle",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x75",
+ "EventName": "INST_DECODED.DECODERS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "CollectPEBSRecord": "2",
+ "Counter": "Fixed counter 0",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PEBScounters": "32",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "PEBScounters": "1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "INST_RETIRED.MACRO_FUSED",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.MACRO_FUSED",
+ "PEBScounters": "1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of all retired NOP instructions.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.NOP",
+ "PEBScounters": "1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Precise instruction retired with PEBS precise-distribution",
+ "CollectPEBSRecord": "2",
+ "Counter": "Fixed counter 0",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "1",
+ "PEBScounters": "32",
+ "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "INST_RETIRED.REP_ITERATION",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.REP_ITERATION",
+ "PEBScounters": "1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "INT_MISC.MBA_STALLS",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.MBA_STALLS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x7",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "TakenAlone": "1",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "TMA slots where uops got dropped",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.UOP_DROPPING",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.128BIT",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.128BIT",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x13"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.256BIT",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.256BIT",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xac"
+ },
+ {
+ "BriefDescription": "integer ADD, SUB, SAD 128-bit vector instructions.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.ADD_128",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 128-bit vector instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "integer ADD, SUB, SAD 256-bit vector instructions.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.ADD_256",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 256-bit vector instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.MUL_256",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.MUL_256",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.SHUFFLES",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.SHUFFLES",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.VNNI_128",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.VNNI_128",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.VNNI_256",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.VNNI_256",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.ADDRESS_ALIAS",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times a load got blocked due to false dependencies in MOB due to partial compare on address.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x88"
+ },
+ {
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PREFETCH.SWPF",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "6",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_OK",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the cycles when optimal number of uops is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa8",
+ "EventName": "LSD.UOPS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of machine clears (nukes) of any type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "MISC2_RETIRED.LFENCE",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xe0",
+ "EventName": "MISC2_RETIRED.LFENCE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xcc",
+ "EventName": "MISC_RETIRED.LBR_INSERTS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SCOREBOARD",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "TMA slots where no uops were being issued due to lack of back-end resources.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of slots in TMA method where no micro-operations were being issued from front-end to back-end of the machine due to lack of back-end resources.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "TMA slots wasted due to incorrect speculations.",
+ "CollectPEBSRecord": "2",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BAD_SPEC_SLOTS",
+ "PublicDescription": "Number of slots of TMA method that were wasted due to incorrect speculation. It covers all types of control-flow or data-related mis-speculations.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions",
+ "CollectPEBSRecord": "2",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
+ "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by (any type of) branch mispredictions. This event estimates number of specualtive operations that were issued but not retired as well as the out-of-order engine recovery past a branch misprediction.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "TOPDOWN.MEMORY_BOUND_SLOTS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.MEMORY_BOUND_SLOTS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "CollectPEBSRecord": "2",
+ "Counter": "Fixed counter 3",
+ "EventName": "TOPDOWN.SLOTS",
+ "PEBScounters": "35",
+ "PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.SLOTS_P",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "UOPS_DECODED.DEC0_UOPS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x76",
+ "EventName": "UOPS_DECODED.DEC0_UOPS",
+ "PEBScounters": "0,1,2,3",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Uops executed on port 0",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_0",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of uops dispatch to execution port 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Uops executed on port 1",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of uops dispatch to execution port 1.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 2, 3 and 10",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_2_3_10",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of uops dispatch to execution ports 2, 3 and 10",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 4 and 9",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_4_9",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of uops dispatch to execution ports 4 and 9",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 5 and 11",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_5_11",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of uops dispatch to execution ports 5 and 11",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Uops executed on port 6",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_6",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of uops dispatch to execution port 6.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 7 and 8",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_7_8",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Number of uops dispatch to execution ports 7 and 8.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of uops executed on the core.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of uops executed from any thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles when at least 1 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles when at least 2 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles when at least 3 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles when at least 4 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.STALLS",
+ "Invert": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UOPS_EXECUTED.STALLS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.X87",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of x87 uops executed.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Uops that RAT issues to RS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xae",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with retired uop(s).",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.CYCLES",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts cycles where at least one uop has retired.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "UOPS_RETIRED.HEAVY",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.HEAVY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "UOPS_RETIRED.MS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.MS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x8",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "2000003",
+ "TakenAlone": "1",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retirement slots used.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.SLOTS",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the retirement slots used each cycle.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.STALLS",
+ "Invert": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UOPS_RETIRED.STALLS",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "CounterMask": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
new file mode 100644
index 000000000000..8f9497838bd4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
@@ -0,0 +1,530 @@
+[
+ {
+ "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
+ "MetricExpr": "100 * (( BR_INST_RETIRED.COND + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) ) / TOPDOWN.SLOTS)",
+ "MetricGroup": "Ret",
+ "MetricName": "Branching_Overhead"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "IPC"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / (INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD)",
+ "MetricGroup": "Pipeline;Mem",
+ "MetricName": "CPI"
+ },
+ {
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Pipeline",
+ "MetricName": "CLKS"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "TOPDOWN.SLOTS",
+ "MetricGroup": "TmaL1",
+ "MetricName": "SLOTS"
+ },
+ {
+ "BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
+ "MetricExpr": "TOPDOWN.SLOTS / ( TOPDOWN.SLOTS / 2 ) if #SMT_on else 1",
+ "MetricGroup": "SMT;TmaL1",
+ "MetricName": "Slots_Utilization"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "Execute_per_Issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.DISTRIBUTED",
+ "MetricGroup": "Ret;SMT;TmaL1",
+ "MetricName": "CoreIPC"
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + FP_ARITH_INST_RETIRED2.SCALAR_HALF ) + 2 * ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF ) + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED2.128B_PACKED_HALF + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * ( FP_ARITH_INST_RETIRED2.256B_PACKED_HALF + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) + 32 * FP_ARITH_INST_RETIRED2.512B_PACKED_HALF + 4 * AMX_OPS_RETIRED.BF16 ) / CPU_CLK_UNHALTED.DISTRIBUTED",
+ "MetricGroup": "Ret;Flops",
+ "MetricName": "FLOPc"
+ },
+ {
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
+ "MetricExpr": "( FP_ARITH_DISPATCHED.PORT_0 + FP_ARITH_DISPATCHED.PORT_1 + FP_ARITH_DISPATCHED.PORT_5 ) / ( 2 * CPU_CLK_UNHALTED.DISTRIBUTED )",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "FP_Arith_Utilization",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "ILP"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "MetricGroup": "SMT",
+ "MetricName": "CORE_CLKS"
+ },
+ {
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "MetricGroup": "InsType",
+ "MetricName": "IpLoad"
+ },
+ {
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "MetricGroup": "InsType",
+ "MetricName": "IpStore"
+ },
+ {
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Branches;Fed;InsType",
+ "MetricName": "IpBranch"
+ },
+ {
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "IpCall"
+ },
+ {
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO",
+ "MetricName": "IpTB"
+ },
+ {
+ "BriefDescription": "Branch instructions per taken branch. ",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "BpTkBranch"
+ },
+ {
+ "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + FP_ARITH_INST_RETIRED2.SCALAR_HALF ) + 2 * ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF ) + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED2.128B_PACKED_HALF + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * ( FP_ARITH_INST_RETIRED2.256B_PACKED_HALF + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) + 32 * FP_ARITH_INST_RETIRED2.512B_PACKED_HALF + 4 * AMX_OPS_RETIRED.BF16 )",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "IpFLOP"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / ( (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + FP_ARITH_INST_RETIRED2.SCALAR) + (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.VECTOR) )",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "IpArith",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "IpArith_Scalar_SP",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "IpArith_Scalar_DP",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.128B_PACKED_HALF )",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "IpArith_AVX128",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / ( FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.256B_PACKED_HALF )",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "IpArith_AVX256",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / ( FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.512B_PACKED_HALF )",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "IpArith_AVX512",
+ "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AMX operation (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / AMX_OPS_RETIRED.BF16",
+ "MetricGroup": "Flops;FpVector;InsType;Server",
+ "MetricName": "IpArith_AMX_F16",
+ "PublicDescription": "Instructions per FP Arithmetic AMX operation (lower number means higher occurrence rate). Operations factored per matrices' sizes of the AMX instructions."
+ },
+ {
+ "BriefDescription": "Instructions per Integer Arithmetic AMX operation (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / AMX_OPS_RETIRED.INT8",
+ "MetricGroup": "IntVector;InsType;Server",
+ "MetricName": "IpArith_AMX_Int8",
+ "PublicDescription": "Instructions per Integer Arithmetic AMX operation (lower number means higher occurrence rate). Operations factored per matrices' sizes of the AMX instructions."
+ },
+ {
+ "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricGroup": "Prefetches",
+ "MetricName": "IpSWPF"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions, Sample with: INST_RETIRED.PREC_DIST",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary;TmaL1",
+ "MetricName": "Instructions"
+ },
+ {
+ "BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions",
+ "MetricExpr": "INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "Strings_Cycles"
+ },
+ {
+ "BriefDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu@ASSISTS.ANY\\,umask\\=0x1B@",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "IpAssist"
+ },
+ {
+ "BriefDescription": "",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "Execute"
+ },
+ {
+ "BriefDescription": "Average number of Uops issued by front-end when it issued something",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "Fetch_UpC"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "MetricGroup": "DSB;Fed;FetchBW",
+ "MetricName": "DSB_Coverage"
+ },
+ {
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
+ "MetricGroup": "DSBmiss",
+ "MetricName": "DSB_Switch_Cost"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MetricGroup": "DSBmiss;Fed",
+ "MetricName": "IpDSB_Miss_Ret"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "IpMispredict"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are non-taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "Cond_NT"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_TAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "Cond_TK"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are CALL or RET",
+ "MetricExpr": "( BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN ) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "CallRet"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "Jump"
+ },
+ {
+ "BriefDescription": "Fraction of branches of other types (not individually covered by other metrics in Info.Branches group)",
+ "MetricExpr": "1 - ( (BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES) + (BR_INST_RETIRED.COND_TAKEN / BR_INST_RETIRED.ALL_BRANCHES) + (( BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN ) / BR_INST_RETIRED.ALL_BRANCHES) + ((BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES) )",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "Other_Branches"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / MEM_LOAD_COMPLETED.L1_MISS_ANY",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "Load_Miss_Real_Latency"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBound;MemoryBW",
+ "MetricName": "MLP"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L1MPKI"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1000 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L1MPKI_Load"
+ },
+ {
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;Backend;CacheMisses",
+ "MetricName": "L2MPKI"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses;Offcore",
+ "MetricName": "L2MPKI_All"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L2MPKI_Load"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L2HPKI_All"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1000 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L2HPKI_Load"
+ },
+ {
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "L3MPKI"
+ },
+ {
+ "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
+ "MetricExpr": "1000 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;CacheMisses",
+ "MetricName": "FB_HPKI"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricConstraint": "NO_NMI_WATCHDOG",
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING ) / ( 4 * CPU_CLK_UNHALTED.DISTRIBUTED )",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "Page_Walks_Utilization"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW"
+ },
+ {
+ "BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
+ "MetricExpr": "1000 * L2_LINES_OUT.SILENT / INST_RETIRED.ANY",
+ "MetricGroup": "L2Evicts;Mem;Server",
+ "MetricName": "L2_Evictions_Silent_PKI"
+ },
+ {
+ "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction",
+ "MetricExpr": "1000 * L2_LINES_OUT.NON_SILENT / INST_RETIRED.ANY",
+ "MetricGroup": "L2Evicts;Mem;Server",
+ "MetricName": "L2_Evictions_NonSilent_PKI"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "(64 * L1D.REPLACEMENT / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L1D_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "(64 * L2_LINES_IN.ALL / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L2_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "L3_Cache_Fill_BW_1T"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "(64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time)",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "L3_Cache_Access_BW_1T"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "CPU_Utilization"
+ },
+ {
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC) * msr@tsc@ / 1000000000 / duration_time",
+ "MetricGroup": "Summary;Power",
+ "MetricName": "Average_Frequency"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "( ( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + FP_ARITH_INST_RETIRED2.SCALAR_HALF ) + 2 * ( FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF ) + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED2.128B_PACKED_HALF + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * ( FP_ARITH_INST_RETIRED2.256B_PACKED_HALF + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE ) + 32 * FP_ARITH_INST_RETIRED2.512B_PACKED_HALF + 4 * AMX_OPS_RETIRED.BF16 ) / 1000000000 ) / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "GFLOPs",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
+ },
+ {
+ "BriefDescription": "Tera Integer (matrix) Operations Per Second",
+ "MetricExpr": "( 8 * AMX_OPS_RETIRED.INT8 / 1000000000000 ) / duration_time",
+ "MetricGroup": "Cor;HPC;IntVector;Server",
+ "MetricName": "TIOPS"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "Turbo_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_DISTRIBUTED if #SMT_on else 0",
+ "MetricGroup": "SMT",
+ "MetricName": "SMT_2T_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "Kernel_Utilization"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "Kernel_CPI"
+ },
+ {
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC",
+ "MetricName": "DRAM_BW_Use"
+ },
+ {
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
+ "MetricExpr": "1000000000 * ( UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_INSERTS.IA_MISS_DRD ) / ( uncore_cha_0@event\\=0x1@ / duration_time )",
+ "MetricGroup": "Mem;MemoryLat;SoC",
+ "MetricName": "MEM_Read_Latency"
+ },
+ {
+ "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+ "MetricExpr": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / cha@UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD\\,thresh\\=1@",
+ "MetricGroup": "Mem;MemoryBW;SoC",
+ "MetricName": "MEM_Parallel_Reads"
+ },
+ {
+ "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches",
+ "MetricExpr": "( 1000000000 * ( UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM ) / uncore_cha_0@event\\=0x1@ )",
+ "MetricGroup": "Mem;MemoryLat;SoC;Server",
+ "MetricName": "MEM_PMM_Read_Latency"
+ },
+ {
+ "BriefDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches",
+ "MetricExpr": " 1000000000 * ( UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR ) / uncore_cha_0@event\\=0x1@",
+ "MetricGroup": "Mem;MemoryLat;SoC;Server",
+ "MetricName": "MEM_DRAM_Read_Latency"
+ },
+ {
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
+ "MetricExpr": "( ( 64 * UNC_M_PMM_RPQ_INSERTS / 1000000000 ) / duration_time )",
+ "MetricGroup": "Mem;MemoryBW;SoC;Server",
+ "MetricName": "PMM_Read_BW"
+ },
+ {
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
+ "MetricExpr": "( ( 64 * UNC_M_PMM_WPQ_INSERTS / 1000000000 ) / duration_time )",
+ "MetricGroup": "Mem;MemoryBW;SoC;Server",
+ "MetricName": "PMM_Write_BW"
+ },
+ {
+ "BriefDescription": "Average IO (network or disk) Bandwidth Use for Writes [GB / sec]",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR * 64 / 1000000000 / duration_time",
+ "MetricGroup": "IoBW;Mem;SoC;Server",
+ "MetricName": "IO_Write_BW"
+ },
+ {
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "uncore_cha_0@event\\=0x1@",
+ "MetricGroup": "SoC",
+ "MetricName": "Socket_CLKS"
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "IpFarBranch"
+ },
+ {
+ "BriefDescription": "C1 residency percent per core",
+ "MetricExpr": "(cstate_core@c1\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C1_Core_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-memory.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-memory.json
new file mode 100644
index 000000000000..41d7cd4958a1
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-memory.json
@@ -0,0 +1,499 @@
+[
+ {
+ "BriefDescription": "IMC Clockticks at DCLK frequency",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_M_CLOCKTICKS",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "IMC Clockticks at HCLK frequency",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_M_HCLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM read CAS commands issued (does not include underfills)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "UMask": "0x00000000c1",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM underfill read CAS commands issued",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0x00000000c4",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM read CAS commands issued (including underfills)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "UMask": "0x00000000cf",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM write CAS commands issued",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "UMask": "0x00000000f0",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x80",
+ "EventName": "UNC_M_RPQ_OCCUPANCY_PCH0",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x81",
+ "EventName": "UNC_M_RPQ_OCCUPANCY_PCH1",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x82",
+ "EventName": "UNC_M_WPQ_OCCUPANCY_PCH0",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_WPQ_OCCUPANCY_PCH1",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL_SCH0",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL_SCH1",
+ "PerPkg": "1",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M_PMM_RPQ_INSERTS",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Pending Queue inserts",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe7",
+ "EventName": "UNC_M_PMM_WPQ_INSERTS",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL_SCH0",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL_SCH1",
+ "PerPkg": "1",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Activate due to read, write, underfill, or bypass",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_ACT_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x00000000ff",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Precharge due to read on page miss",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.RD",
+ "PerPkg": "1",
+ "UMask": "0x0000000011",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Precharge due to write on page miss",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.WR",
+ "PerPkg": "1",
+ "UMask": "0x0000000022",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to (?)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.PGT",
+ "PerPkg": "1",
+ "UMask": "0x0000000088",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Precharge due to read, write, underfill, or PGT",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x00000000ff",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM CAS commands issued",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x00000000ff",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.RD_PRE_REG",
+ "PerPkg": "1",
+ "UMask": "0x00000000c2",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.RD_PRE_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0x00000000c8",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.WR_PRE",
+ "PerPkg": "1",
+ "UMask": "0x00000000e0",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.NO_GNT_SCH0",
+ "PerPkg": "1",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.NO_GNT_SCH1",
+ "PerPkg": "1",
+ "UMask": "0x0000000008",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to read",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.RD_PCH0",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to write",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.WR_PCH0",
+ "PerPkg": "1",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.UFILL_PCH0",
+ "PerPkg": "1",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Prechages from Page Table",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.PGT_PCH0",
+ "PerPkg": "1",
+ "UMask": "0x0000000008",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.RD_PCH1",
+ "PerPkg": "1",
+ "UMask": "0x0000000010",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.WR_PCH1",
+ "PerPkg": "1",
+ "UMask": "0x0000000020",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.UFILL_PCH1",
+ "PerPkg": "1",
+ "UMask": "0x0000000040",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.PGT_PCH1",
+ "PerPkg": "1",
+ "UMask": "0x0000000080",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.UFILL",
+ "PerPkg": "1",
+ "UMask": "0x0000000044",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/o auto-pre",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.WR_NONPRE",
+ "PerPkg": "1",
+ "UMask": "0x00000000D0",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : Pseudo Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x0000000040",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : Pseudo Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x0000000080",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.GNT_WAIT_SCH0",
+ "PerPkg": "1",
+ "UMask": "0x0000000010",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.GNT_WAIT_SCH1",
+ "PerPkg": "1",
+ "UMask": "0x0000000020",
+ "UMaskExt": "0x00000000",
+ "Unit": "iMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-other.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-other.json
new file mode 100644
index 000000000000..9b8664c50213
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-other.json
@@ -0,0 +1,5150 @@
+[
+ {
+ "BriefDescription": "UPI Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "UMask": "0x000000000f",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
+ "Counter": "FIXED",
+ "CounterType": "FIXED",
+ "EventCode": "0xff",
+ "EventName": "UNC_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "IRP Clockticks",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "M2P Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_M2P_CLOCKTICKS",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "IIO Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_IIO_CLOCKTICKS",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made by IIO Part0 to Memory",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made by IIO Part1 to Memory",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made by IIO Part2 to Memory",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made by IIO Part3 to Memory",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "M2M Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_M2M_CLOCKTICKS",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M3UPI Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_M3UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Read requests from a unit on this socket",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read requests from a remote socket",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Write Requests from a unit on this socket",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Writes Remote",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x0000000008",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Requests for exclusive ownership of a cache line without receiving data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x0000000030",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_CHA_CLOCKTICKS",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for CRd misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
+ "PerPkg": "1",
+ "UMask": "0x00c80ffe01",
+ "UMaskExt": "0x00c80ffe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRd misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD",
+ "PerPkg": "1",
+ "UMask": "0x00c817fe01",
+ "UMaskExt": "0x00c817fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRd Pref misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c897fe01",
+ "UMaskExt": "0x00c897fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for ItoM from local IO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM",
+ "PerPkg": "1",
+ "UMask": "0x00cc43ff04",
+ "UMaskExt": "0x00cc43ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRd misses from local IA targeting local memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x00c816fe01",
+ "UMaskExt": "0x00c816fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRd misses from local IA targeting remote memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x00c8177e01",
+ "UMaskExt": "0x00c8177e",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRd Pref misses from local IA targeting local memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x00C896FE01",
+ "UMaskExt": "0x00C896FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRd Pref misses from local IA targeting remote memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x00C8977E01",
+ "UMaskExt": "0x00C8977E",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRd misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD",
+ "PerPkg": "1",
+ "UMask": "0x00c817fe01",
+ "UMaskExt": "0x00c817fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRd misses from local IA targeting local memory",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x00c816fe01",
+ "UMaskExt": "0x00c816fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRd misses from local IA targeting remote memory",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x00c8177e01",
+ "UMaskExt": "0x00c8177e",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00c8178a01",
+ "UMaskExt": "0x00c8178a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRds issued by IA Cores targeting DDR Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00c8178601",
+ "UMaskExt": "0x00c81786",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRds issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00c8178601",
+ "UMaskExt": "0x00c81786",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00c8178a01",
+ "UMaskExt": "0x00c8178a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for RdCur from local IO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0x00c8f3ff04",
+ "UMaskExt": "0x00c8f3ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for ItoMCacheNears from IO devices",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0x00cd43ff04",
+ "UMaskExt": "0x00cd43ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.DATA",
+ "PerPkg": "1",
+ "UMask": "0x0000000008",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : LLCRD Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.LLCRD",
+ "PerPkg": "1",
+ "UMask": "0x0000000010",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot NULL or LLCRD Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.NULL",
+ "PerPkg": "1",
+ "UMask": "0x0000000020",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : LLCTRL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.LLCTRL",
+ "PerPkg": "1",
+ "UMask": "0x0000000040",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Protocol Header",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.PROTHDR",
+ "PerPkg": "1",
+ "UMask": "0x0000000080",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All Non Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "UMask": "0x0000000097",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Idle",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.IDLE",
+ "PerPkg": "1",
+ "UMask": "0x0000000047",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "All Null Flits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "UMask": "0x0000000027",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.DATA",
+ "PerPkg": "1",
+ "UMask": "0x0000000008",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : LLCRD Not Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.LLCRD",
+ "PerPkg": "1",
+ "UMask": "0x0000000010",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot NULL or LLCRD Empty",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.NULL",
+ "PerPkg": "1",
+ "UMask": "0x0000000020",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : LLCTRL",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.LLCTRL",
+ "PerPkg": "1",
+ "UMask": "0x0000000040",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Protocol Header",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.PROTHDR",
+ "PerPkg": "1",
+ "UMask": "0x0000000080",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : All Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "UMask": "0x000000000f",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : All Non Data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "UMask": "0x0000000097",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Idle",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.IDLE",
+ "PerPkg": "1",
+ "UMask": "0x0000000047",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Null FLITs received from any slot",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "UMask": "0x0000000027",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "UMask": "0x000000000e",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB_OPC",
+ "PerPkg": "1",
+ "UMask": "0x000000010e",
+ "UMaskExt": "0x00000001",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "UMask": "0x000000000f",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS_OPC",
+ "PerPkg": "1",
+ "UMask": "0x000000010f",
+ "UMaskExt": "0x00000001",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "UMask": "0x000000000e",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB_OPC",
+ "PerPkg": "1",
+ "UMask": "0x000000010e",
+ "UMaskExt": "0x00000001",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "UMask": "0x000000000f",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS_OPC",
+ "PerPkg": "1",
+ "UMask": "0x000000010f",
+ "UMaskExt": "0x00000001",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Direct packet attempts : D2C",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2C",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Cycles in L1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_UPI_L1_POWER_CYCLES",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations : Slot 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations : Slot 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations : Slot 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Bypassed : Slot 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Bypassed : Slot 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Bypassed : Slot 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets : Slot 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets : Slot 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets : Slot 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x40",
+ "EventName": "UNC_UPI_TxL_INSERTS",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Bypassed",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x41",
+ "EventName": "UNC_UPI_TxL_BYPASSED",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x42",
+ "EventName": "UNC_UPI_TxL_OCCUPANCY",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "FAF allocation -- sent to ADQ",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_FAF_TRANSACTIONS",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF - request insert from TC",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_FAF_INSERTS",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF occupancy",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_FAF_OCCUPANCY",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": ": All Inserts Inbound (p2p + faf + cset)",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_I_IRP_ALL.INBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound write (fast path) requests received by the IRP",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "PerPkg": "1",
+ "UMask": "0x0000000008",
+ "UMaskExt": "0x00000000",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M2P_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by IIO Part0 to Memory",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by IIO Part1 to Memory",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by IIO Part2 to Memory",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by IIO Part3 to Memory",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part0 by the CPU",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part1 by the CPU",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part2 by the CPU",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part3 by the CPU",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part0 by the CPU",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part1 by the CPU",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part2 by the CPU",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part3 by the CPU",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "UMask": "0x0000000080",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "UMask": "0x0000000080",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "UMask": "0x0000000080",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "UMask": "0x0000000080",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "UMask": "0x0000000080",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "UMask": "0x0000000080",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "UMask": "0x0000000080",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "UMask": "0x0000000080",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 4",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 5",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 6",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 7",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "Counter": "2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x00000000ff",
+ "UMaskExt": "0x00000000",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) : AD Ingress (from CMS) Allocations",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x02",
+ "EventName": "UNC_M2M_RxC_AD_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x03",
+ "EventName": "UNC_M2M_RxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages sent direct to core (bypassing the CHA)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2M_DIRECT2CORE_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "UMask": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to core transaction were overridden",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages sent direct to the Intel UPI",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2M_DIRECT2UPI_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to Intel UPI was disabled",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "UMask": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to Intel UPI transactions were overridden",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x07",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads that a message sent direct2 Intel UPI was overridden",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x1c",
+ "EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookups (any state found)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookups (cacheline found in A state)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in I state)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in S state)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from A to I",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2I",
+ "PerPkg": "1",
+ "UMask": "0x0320",
+ "UMaskExt": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from A to S",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2S",
+ "PerPkg": "1",
+ "UMask": "0x0340",
+ "UMaskExt": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from/to Any state",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "UMask": "0x0301",
+ "UMaskExt": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from I to A",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2A",
+ "PerPkg": "1",
+ "UMask": "0x0304",
+ "UMaskExt": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from I to S",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2S",
+ "PerPkg": "1",
+ "UMask": "0x0302",
+ "UMaskExt": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from S to A",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2A",
+ "PerPkg": "1",
+ "UMask": "0x0310",
+ "UMaskExt": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from S to I",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2I",
+ "PerPkg": "1",
+ "UMask": "0x0308",
+ "UMaskExt": "0x03",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x0000000104",
+ "UMaskExt": "0x00000001",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x0000000204",
+ "UMaskExt": "0x00000002",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy : Channel 0",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy : Channel 1",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH0_XPT",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH0_UPI",
+ "PerPkg": "1",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH1_XPT",
+ "PerPkg": "1",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH1_UPI",
+ "PerPkg": "1",
+ "UMask": "0x0000000008",
+ "UMaskExt": "0x00000000",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.UPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x000000000a",
+ "UMaskExt": "0x00000000",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x0000000005",
+ "UMaskExt": "0x00000000",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x0000000005",
+ "UMaskExt": "0x00000000",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": UPI - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.UPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x000000000a",
+ "UMaskExt": "0x00000000",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": XPT - All Channels",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x0000000005",
+ "UMaskExt": "0x00000000",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "FlowQ Generated Prefetch",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x29",
+ "EventName": "UNC_M3UPI_UPI_PREFETCH_SPAWN",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "D2U Sent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2a",
+ "EventName": "UNC_M3UPI_D2U_SENT",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "D2C Sent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x2b",
+ "EventName": "UNC_M3UPI_D2C_SENT",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M3UPI CMS Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M3UPI_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x0000000010",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read requests made into the CHA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS",
+ "PerPkg": "1",
+ "UMask": "0x0000000003",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Write requests made into the CHA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0x000000000c",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Not Needed",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Needed",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.SNP",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from the HA pipe",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.HA",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from TOR pipe",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.TOR",
+ "PerPkg": "1",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Local InvItoE",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.LOCAL_INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Local Rd",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.LOCAL_READ",
+ "PerPkg": "1",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0xc0",
+ "EventName": "UNC_CHA_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_RD",
+ "PerPkg": "1",
+ "UMask": "0x00001bc1ff",
+ "UMaskExt": "0x00001bc1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Snoop Requests from a Remote Socket",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNP",
+ "PerPkg": "1",
+ "UMask": "0x00001c19ff",
+ "UMaskExt": "0x00001c19",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All Snoops from Remote",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.SNPS_FROM_REM",
+ "PerPkg": "1",
+ "UMask": "0x00c001ff08",
+ "UMaskExt": "0x00c001ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x00C001FFff",
+ "UMaskExt": "0x00C001FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All from Local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA",
+ "PerPkg": "1",
+ "UMask": "0x00c001ff01",
+ "UMaskExt": "0x00c001ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Hits from Local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
+ "PerPkg": "1",
+ "UMask": "0x00c001fd01",
+ "UMaskExt": "0x00c001fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; CRd hits from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
+ "PerPkg": "1",
+ "UMask": "0x00c80ffd01",
+ "UMaskExt": "0x00c80ffd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd hits from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD",
+ "PerPkg": "1",
+ "UMask": "0x00c817fd01",
+ "UMaskExt": "0x00c817fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefRFO hits from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFRFO",
+ "PerPkg": "1",
+ "UMask": "0x00ccc7fd01",
+ "UMaskExt": "0x00ccc7fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO hits from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
+ "PerPkg": "1",
+ "UMask": "0x00c807fd01",
+ "UMaskExt": "0x00c807fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; misses from Local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "PerPkg": "1",
+ "UMask": "0x00c001fe01",
+ "UMaskExt": "0x00c001fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefRFO misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO",
+ "PerPkg": "1",
+ "UMask": "0x00ccc7fe01",
+ "UMaskExt": "0x00ccc7fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
+ "PerPkg": "1",
+ "UMask": "0x00c807fe01",
+ "UMaskExt": "0x00c807fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All from local IO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO",
+ "PerPkg": "1",
+ "UMask": "0x00c001ff04",
+ "UMaskExt": "0x00c001ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Hits from local IO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
+ "PerPkg": "1",
+ "UMask": "0x00c001fd04",
+ "UMaskExt": "0x00c001fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses from local IO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
+ "PerPkg": "1",
+ "UMask": "0x00c001fe04",
+ "UMaskExt": "0x00c001fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; ItoM misses from local IO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
+ "PerPkg": "1",
+ "UMask": "0x00cc43fe04",
+ "UMaskExt": "0x00cc43fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO misses from local IO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
+ "PerPkg": "1",
+ "UMask": "0x00c803fe04",
+ "UMaskExt": "0x00c803fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : IRQ - iA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IRQ_IA",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : SF/LLC Evictions",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PRQ - IOSF",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PRQ_IOSF",
+ "PerPkg": "1",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : IPQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x0000000008",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : IRQ - Non iA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IRQ_NON_IA",
+ "PerPkg": "1",
+ "UMask": "0x0000000010",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PRQ - Non IOSF",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PRQ_NON_IOSF",
+ "PerPkg": "1",
+ "UMask": "0x0000000020",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RRQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.RRQ",
+ "PerPkg": "1",
+ "UMask": "0x0000000040",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBQ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.WBQ",
+ "PerPkg": "1",
+ "UMask": "0x0000000080",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Local IO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_IO",
+ "PerPkg": "1",
+ "UMask": "0x00C000FF04",
+ "UMaskExt": "0x00C000FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Local iA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_IA",
+ "PerPkg": "1",
+ "UMask": "0x00c000ff01",
+ "UMaskExt": "0x00c000ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Local iA and IO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0x00C000FF05",
+ "UMaskExt": "0x00C000FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All Snoops from Remote",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.REM_SNPS",
+ "PerPkg": "1",
+ "UMask": "0x00C001FF08",
+ "UMaskExt": "0x00C001FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Remote",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.REM_ALL",
+ "PerPkg": "1",
+ "UMask": "0x00C001FFC8",
+ "UMaskExt": "0x00C001FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Hits",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.HIT",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000001",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Misses",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MISS",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000002",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : MMCFG Access",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MMCFG",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000020",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : MMIO Access",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MMIO",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000040",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Local Targets",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOCAL_TGT",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000080",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Remote Targets",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.REMOTE_TGT",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000100",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Match the Opcode in b[29:19] of the extended umask field",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MATCH_OPC",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000200",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PREMORPH_OPC",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000400",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just NonCoherent",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.NONCOH",
+ "PerPkg": "1",
+ "UMaskExt": "0x01000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just ISOC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ISOC",
+ "PerPkg": "1",
+ "UMaskExt": "0x02000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; CRd Pref hits from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c88ffd01",
+ "UMaskExt": "0x00c88ffd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Pref hits from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c897fd01",
+ "UMaskExt": "0x00c897fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Opt hits from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT",
+ "PerPkg": "1",
+ "UMask": "0x00c827fd01",
+ "UMaskExt": "0x00c827fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Opt Pref hits from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c8a7fd01",
+ "UMaskExt": "0x00c8a7fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO Pref hits from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c887fd01",
+ "UMaskExt": "0x00c887fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; CRd Pref misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c88ffe01",
+ "UMaskExt": "0x00c88ffe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Opt misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT",
+ "PerPkg": "1",
+ "UMask": "0x00c827fe01",
+ "UMaskExt": "0x00c827fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Opt Pref misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c8a7fe01",
+ "UMaskExt": "0x00c8a7fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO pref misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c887fe01",
+ "UMaskExt": "0x00c887fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; ItoM hits from local IO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOM",
+ "PerPkg": "1",
+ "UMask": "0x00cc43fd04",
+ "UMaskExt": "0x00cc43fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO hits from local IO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_RFO",
+ "PerPkg": "1",
+ "UMask": "0x00c803fd04",
+ "UMaskExt": "0x00c803fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO from local IO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_RFO",
+ "PerPkg": "1",
+ "UMask": "0x00c803ff04",
+ "UMaskExt": "0x00c803ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO pref from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c887ff01",
+ "UMaskExt": "0x00c887ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO",
+ "PerPkg": "1",
+ "UMask": "0x00c807ff01",
+ "UMaskExt": "0x00c807ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefRFO from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFRFO",
+ "PerPkg": "1",
+ "UMask": "0x00ccc7ff01",
+ "UMaskExt": "0x00ccc7ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD",
+ "PerPkg": "1",
+ "UMask": "0x00c817ff01",
+ "UMaskExt": "0x00c817ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Pref from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c897ff01",
+ "UMaskExt": "0x00c897ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Opt from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT",
+ "PerPkg": "1",
+ "UMask": "0x00c827ff01",
+ "UMaskExt": "0x00c827ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Opt Pref from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c8a7ff01",
+ "UMaskExt": "0x00c8a7ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; CRd Pref from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00C88FFF01",
+ "UMaskExt": "0x00C88FFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; CRd from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD",
+ "PerPkg": "1",
+ "UMask": "0x00c80fff01",
+ "UMaskExt": "0x00c80fff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts RFO misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x00c806fe01",
+ "UMaskExt": "0x00c806fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x00c8077e01",
+ "UMaskExt": "0x00c8077e",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO prefetch misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x00c886fe01",
+ "UMaskExt": "0x00c886fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO prefetch misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x00c8877e01",
+ "UMaskExt": "0x00c8877e",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts;CLFlush from Local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSH",
+ "PerPkg": "1",
+ "UMask": "0x00c8c7ff01",
+ "UMaskExt": "0x00c8c7ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts;CLFlushOpt from Local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSHOPT",
+ "PerPkg": "1",
+ "UMask": "0x00c8d7ff01",
+ "UMaskExt": "0x00c8d7ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts;ItoM from Local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_ITOM",
+ "PerPkg": "1",
+ "UMask": "0x00cc47ff01",
+ "UMaskExt": "0x00cc47ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts;SpecItoM from Local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_SPECITOM",
+ "PerPkg": "1",
+ "UMask": "0x00cc57ff01",
+ "UMaskExt": "0x00cc57ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; All Snoops from Remote",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.SNPS_FROM_REM",
+ "PerPkg": "1",
+ "UMask": "0x00c001ff08",
+ "UMaskExt": "0x00c001ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x00C001FFff",
+ "UMaskExt": "0x00C001FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; All from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
+ "PerPkg": "1",
+ "UMask": "0x00c001ff01",
+ "UMaskExt": "0x00c001ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hits from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
+ "PerPkg": "1",
+ "UMask": "0x00c001fd01",
+ "UMaskExt": "0x00c001fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd hits from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
+ "PerPkg": "1",
+ "UMask": "0x00c80ffd01",
+ "UMaskExt": "0x00c80ffd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd hits from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD",
+ "PerPkg": "1",
+ "UMask": "0x00c817fd01",
+ "UMaskExt": "0x00c817fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefRFO hits from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFRFO",
+ "PerPkg": "1",
+ "UMask": "0x00ccc7fd01",
+ "UMaskExt": "0x00ccc7fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO hits from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
+ "PerPkg": "1",
+ "UMask": "0x00c807fd01",
+ "UMaskExt": "0x00c807fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses from Local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+ "PerPkg": "1",
+ "UMask": "0x00c001fe01",
+ "UMaskExt": "0x00c001fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
+ "PerPkg": "1",
+ "UMask": "0x00c80ffe01",
+ "UMaskExt": "0x00c80ffe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefRFO misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO",
+ "PerPkg": "1",
+ "UMask": "0x00ccc7fe01",
+ "UMaskExt": "0x00ccc7fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
+ "PerPkg": "1",
+ "UMask": "0x00c807fe01",
+ "UMaskExt": "0x00c807fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; All from local IO",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
+ "PerPkg": "1",
+ "UMask": "0x00c001ff04",
+ "UMaskExt": "0x00c001ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hits from local IO",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "PerPkg": "1",
+ "UMask": "0x00c001fd04",
+ "UMaskExt": "0x00c001fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses from local IO",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "PerPkg": "1",
+ "UMask": "0x00c001fe04",
+ "UMaskExt": "0x00c001fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO misses from local IO",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
+ "PerPkg": "1",
+ "UMask": "0x00c803fe04",
+ "UMaskExt": "0x00c803fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; ITOM misses from local IO",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
+ "PerPkg": "1",
+ "UMask": "0x00cc43fe04",
+ "UMaskExt": "0x00cc43fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : IRQ - iA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_IA",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : SF/LLC Evictions",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PRQ - IOSF",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : IPQ",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x0000000008",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : IRQ - Non iA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_NON_IA",
+ "PerPkg": "1",
+ "UMask": "0x0000000010",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PRQ - Non IOSF",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ_NON_IOSF",
+ "PerPkg": "1",
+ "UMask": "0x0000000020",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RRQ",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.RRQ",
+ "PerPkg": "1",
+ "UMask": "0x0000000040",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WBQ",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.WBQ",
+ "PerPkg": "1",
+ "UMask": "0x0000000080",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Local IO",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IO",
+ "PerPkg": "1",
+ "UMask": "0x00C000FF04",
+ "UMaskExt": "0x00C000FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Local iA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IA",
+ "PerPkg": "1",
+ "UMask": "0x00C000FF01",
+ "UMaskExt": "0x00C000FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Local iA and IO",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0x00C000FF05",
+ "UMaskExt": "0x00C000FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All Snoops from Remote",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.REM_SNPS",
+ "PerPkg": "1",
+ "UMask": "0x00C001FF08",
+ "UMaskExt": "0x00C001FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Remote",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.REM_ALL",
+ "PerPkg": "1",
+ "UMask": "0x00C001FFC8",
+ "UMaskExt": "0x00C001FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Hits",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.HIT",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000001",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Misses",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MISS",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000002",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : MMCFG Access",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MMCFG",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000020",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : MMIO Access",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MMIO",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000040",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Local Targets",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOCAL_TGT",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000080",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Remote Targets",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.REMOTE_TGT",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000100",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Match the Opcode in b[29:19] of the extended umask field",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MATCH_OPC",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000200",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PREMORPH_OPC",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000400",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just NonCoherent",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.NONCOH",
+ "PerPkg": "1",
+ "UMaskExt": "0x01000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just ISOC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ISOC",
+ "PerPkg": "1",
+ "UMaskExt": "0x02000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd Pref hits from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c88ffd01",
+ "UMaskExt": "0x00c88ffd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Pref hits from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c897fd01",
+ "UMaskExt": "0x00c897fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Opt hits from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT",
+ "PerPkg": "1",
+ "UMask": "0x00c827fd01",
+ "UMaskExt": "0x00c827fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Opt Pref hits from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c8a7fd01",
+ "UMaskExt": "0x00c8a7fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO Pref hits from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c887fd01",
+ "UMaskExt": "0x00c887fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd Pref misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c88ffe01",
+ "UMaskExt": "0x00c88ffe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Pref misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c897fe01",
+ "UMaskExt": "0x00c897fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Opt misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT",
+ "PerPkg": "1",
+ "UMask": "0x00c827fe01",
+ "UMaskExt": "0x00c827fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Opt Pref misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c8a7fe01",
+ "UMaskExt": "0x00c8a7fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO prefetch misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c887fe01",
+ "UMaskExt": "0x00c887fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; ITOM hits from local IO",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOM",
+ "PerPkg": "1",
+ "UMask": "0x00cc43fd04",
+ "UMaskExt": "0x00cc43fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO hits from local IO",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_RFO",
+ "PerPkg": "1",
+ "UMask": "0x00c803fd04",
+ "UMaskExt": "0x00c803fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; ItoM from local IO",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_RFO",
+ "PerPkg": "1",
+ "UMask": "0x00c803ff04",
+ "UMaskExt": "0x00c803ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; ITOM from local IO",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOM",
+ "PerPkg": "1",
+ "UMask": "0x00cc43ff04",
+ "UMaskExt": "0x00cc43ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO",
+ "PerPkg": "1",
+ "UMask": "0x00c807ff01",
+ "UMaskExt": "0x00c807ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO prefetch from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c887ff01",
+ "UMaskExt": "0x00c887ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefRFO from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFRFO",
+ "PerPkg": "1",
+ "UMask": "0x00ccc7ff01",
+ "UMaskExt": "0x00ccc7ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD",
+ "PerPkg": "1",
+ "UMask": "0x00c817ff01",
+ "UMaskExt": "0x00c817ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Opt from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT",
+ "PerPkg": "1",
+ "UMask": "0x00c827ff01",
+ "UMaskExt": "0x00c827ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Opt Pref from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c8a7ff01",
+ "UMaskExt": "0x00c8a7ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD",
+ "PerPkg": "1",
+ "UMask": "0x00c80fff01",
+ "UMaskExt": "0x00c80fff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd Pref from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c88fff01",
+ "UMaskExt": "0x00c88fff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Pref from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_PREF",
+ "PerPkg": "1",
+ "UMask": "0x00c897ff01",
+ "UMaskExt": "0x00c897ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Pref misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x00C896FE01",
+ "UMaskExt": "0x00C896FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Pref misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x00C8977E01",
+ "UMaskExt": "0x00C8977E",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x00c806fe01",
+ "UMaskExt": "0x00c806fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x00c8077e01",
+ "UMaskExt": "0x00c8077e",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO prefetch misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x00c886fe01",
+ "UMaskExt": "0x00c886fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO prefetch misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x00c8877e01",
+ "UMaskExt": "0x00c8877e",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All LLC lines in E state that are victimized on a fill",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "PerPkg": "1",
+ "UMask": "0x0000000002",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All LLC lines in M state that are victimized on a fill",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "PerPkg": "1",
+ "UMask": "0x0000000001",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All LLC lines in S state that are victimized on a fill",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "PerPkg": "1",
+ "UMask": "0x0000000004",
+ "UMaskExt": "0x00000000",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0x00cd43fd04",
+ "UMaskExt": "0x00cd43fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0x00cd43fe04",
+ "UMaskExt": "0x00cd43fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRDPTE",
+ "PerPkg": "1",
+ "UMask": "0x00c837fe01",
+ "UMaskExt": "0x00c837fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Hit the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRDPTE",
+ "PerPkg": "1",
+ "UMask": "0x00c837fd01",
+ "UMaskExt": "0x00c837fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRDPTE",
+ "PerPkg": "1",
+ "UMask": "0x00c837ff01",
+ "UMaskExt": "0x00c837ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOE",
+ "PerPkg": "1",
+ "UMask": "0xcc3fff01",
+ "UMaskExt": "0xcc3fff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RdCur and FsRdCur hits from local IO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0x00c8f3fd04",
+ "UMaskExt": "0x00c8f3fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RdCur and FsRdCur misses from local IO",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0x00c8f3fe04",
+ "UMaskExt": "0x00c8f3fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RdCur and FsRdCur hits from local IO",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0x00c8f3fd04",
+ "UMaskExt": "0x00c8f3fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RdCur and FsRdCur misses from local IO",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0x00c8f3fe04",
+ "UMaskExt": "0x00c8f3fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RdCur and FsRdCur from local IO",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_PCIRDCUR",
+ "PerPkg": "1",
+ "UMask": "0x00c8f3ff04",
+ "UMaskExt": "0x00c8f3ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefCode hits from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFCODE",
+ "PerPkg": "1",
+ "UMask": "0x00cccffd01",
+ "UMaskExt": "0x00cccffd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefData hits from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFDATA",
+ "PerPkg": "1",
+ "UMask": "0x00ccd7fd01",
+ "UMaskExt": "0x00ccd7fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefData from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFDATA",
+ "PerPkg": "1",
+ "UMask": "0x00ccd7ff01",
+ "UMaskExt": "0x00ccd7ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefCode misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFCODE",
+ "PerPkg": "1",
+ "UMask": "0x00cccffe01",
+ "UMaskExt": "0x00cccffe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefData misses from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA",
+ "PerPkg": "1",
+ "UMask": "0x00ccd7fe01",
+ "UMaskExt": "0x00ccd7fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefCode hits from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFCODE",
+ "PerPkg": "1",
+ "UMask": "0x00cccffd01",
+ "UMaskExt": "0x00cccffd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefData hits from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFDATA",
+ "PerPkg": "1",
+ "UMask": "0x00ccd7fd01",
+ "UMaskExt": "0x00ccd7fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefData from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFDATA",
+ "PerPkg": "1",
+ "UMask": "0x00ccd7ff01",
+ "UMaskExt": "0x00ccd7ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefCode misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFCODE",
+ "PerPkg": "1",
+ "UMask": "0x00cccffe01",
+ "UMaskExt": "0x00cccffe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefData misses from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA",
+ "PerPkg": "1",
+ "UMask": "0x00ccd7fe01",
+ "UMaskExt": "0x00ccd7fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefCode from local IA",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFCODE",
+ "PerPkg": "1",
+ "UMask": "0x00cccfff01",
+ "UMaskExt": "0x00cccfff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefCode from local IA",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFCODE",
+ "PerPkg": "1",
+ "UMask": "0x00cccfff01",
+ "UMaskExt": "0x00cccfff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00c8168a01",
+ "UMaskExt": "0x00c8168a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00c8170a01",
+ "UMaskExt": "0x00c8170a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00c8168601",
+ "UMaskExt": "0x00c81686",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00c8170601",
+ "UMaskExt": "0x00c81706",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00C8978A01",
+ "UMaskExt": "0x00C8978A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00C8968A01",
+ "UMaskExt": "0x00C8968A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00C8970A01",
+ "UMaskExt": "0x00C8970A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00C8978601",
+ "UMaskExt": "0x00C89786",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00C8968601",
+ "UMaskExt": "0x00C89686",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00C8970601",
+ "UMaskExt": "0x00C89706",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x00C80EFE01",
+ "UMaskExt": "0x00C80EFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x00C80F7E01",
+ "UMaskExt": "0x00C80F7E",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x00C88EFE01",
+ "UMaskExt": "0x00C88EFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x00C88F7E01",
+ "UMaskExt": "0x00C88F7E",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0x00CD47FF01",
+ "UMaskExt": "0x00CD47FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WbMtoIs issued by an iA Cores. Modified Write Backs",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x00cc27ff01",
+ "UMaskExt": "0x00cc27ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by iA Cores that Hit LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_ITOM",
+ "PerPkg": "1",
+ "UMask": "0x00CC47FD01",
+ "UMaskExt": "0x00CC47FD",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by iA Cores that Missed LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_ITOM",
+ "PerPkg": "1",
+ "UMask": "0x00CC47FE01",
+ "UMaskExt": "0x00CC47FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : UCRdFs issued by iA Cores that Missed LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_UCRDF",
+ "PerPkg": "1",
+ "UMask": "0x00C877DE01",
+ "UMaskExt": "0x00C877DE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WiLs issued by iA Cores that Missed LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WIL",
+ "PerPkg": "1",
+ "UMask": "0x00C87FDE01",
+ "UMaskExt": "0x00C87FDE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLF issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WCILF",
+ "PerPkg": "1",
+ "UMask": "0x00C867FF01",
+ "UMaskExt": "0x00C867FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLF issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF",
+ "PerPkg": "1",
+ "UMask": "0x00C867FE01",
+ "UMaskExt": "0x00C867FE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00C8678A01",
+ "UMaskExt": "0x00C8678A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00C8668A01",
+ "UMaskExt": "0x00C8668A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00C8670A01",
+ "UMaskExt": "0x00C8670A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00C8678601",
+ "UMaskExt": "0x00C86786",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00C8668601",
+ "UMaskExt": "0x00C86686",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00C8670601",
+ "UMaskExt": "0x00C86706",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WCIL",
+ "PerPkg": "1",
+ "UMask": "0x00C86FFF01",
+ "UMaskExt": "0x00C86FFF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores that Missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL",
+ "PerPkg": "1",
+ "UMask": "0x00C86FFE01",
+ "UMaskExt": "0x00C86FFE",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00C86F8A01",
+ "UMaskExt": "0x00C86F8A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00C86E8A01",
+ "UMaskExt": "0x00C86E8A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00C86F0A01",
+ "UMaskExt": "0x00C86F0A",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00C86F8601",
+ "UMaskExt": "0x00C86F86",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00C86E8601",
+ "UMaskExt": "0x00C86E86",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00C86F0601",
+ "UMaskExt": "0x00C86F06",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WbMtoIs issued by IO Devices",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x00CC23FF04",
+ "UMaskExt": "0x00CC23FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CLFlushes issued by IO Devices",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_CLFLUSH",
+ "PerPkg": "1",
+ "UMask": "0x00C8C3FF04",
+ "UMaskExt": "0x00C8C3FF",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00c8168a01",
+ "UMaskExt": "0x00c8168a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00c8170a01",
+ "UMaskExt": "0x00c8170a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00c8168601",
+ "UMaskExt": "0x00c81686",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00c8170601",
+ "UMaskExt": "0x00c81706",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00c8978a01",
+ "UMaskExt": "0x00c8978a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00c8968a01",
+ "UMaskExt": "0x00c8968a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00c8970a01",
+ "UMaskExt": "0x00c8970a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00c8978601",
+ "UMaskExt": "0x00c89786",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00c8968601",
+ "UMaskExt": "0x00c89686",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00c8970601",
+ "UMaskExt": "0x00c89706",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x00c80efe01",
+ "UMaskExt": "0x00c80efe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x00c80f7e01",
+ "UMaskExt": "0x00c80f7e",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x00c88efe01",
+ "UMaskExt": "0x00c88efe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x00c88f7e01",
+ "UMaskExt": "0x00c88f7e",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CLFlushes issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSH",
+ "PerPkg": "1",
+ "UMask": "0x00c8c7ff01",
+ "UMaskExt": "0x00c8c7ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CLFlushOpts issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSHOPT",
+ "PerPkg": "1",
+ "UMask": "0x00c8d7ff01",
+ "UMaskExt": "0x00c8d7ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0x00cd47ff01",
+ "UMaskExt": "0x00cd47ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : SpecItoMs issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_SPECITOM",
+ "PerPkg": "1",
+ "UMask": "0x00cc57ff01",
+ "UMaskExt": "0x00cc57ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WbMtoIs issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x00cc27ff01",
+ "UMaskExt": "0x00cc27ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOM",
+ "PerPkg": "1",
+ "UMask": "0x00cc47ff01",
+ "UMaskExt": "0x00cc47ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores that Hit LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_ITOM",
+ "PerPkg": "1",
+ "UMask": "0x00cc47fd01",
+ "UMaskExt": "0x00cc47fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores that Missed LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_ITOM",
+ "PerPkg": "1",
+ "UMask": "0x00cc47fe01",
+ "UMaskExt": "0x00cc47fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_UCRDF",
+ "PerPkg": "1",
+ "UMask": "0x00c877de01",
+ "UMaskExt": "0x00c877de",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WIL",
+ "PerPkg": "1",
+ "UMask": "0x00c87fde01",
+ "UMaskExt": "0x00c87fde",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCILF",
+ "PerPkg": "1",
+ "UMask": "0x00c867ff01",
+ "UMaskExt": "0x00c867ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF",
+ "PerPkg": "1",
+ "UMask": "0x00c867fe01",
+ "UMaskExt": "0x00c867fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00c8678a01",
+ "UMaskExt": "0x00c8678a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00c8668a01",
+ "UMaskExt": "0x00c8668a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00c8670a01",
+ "UMaskExt": "0x00c8670a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00c8678601",
+ "UMaskExt": "0x00c86786",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00c8668601",
+ "UMaskExt": "0x00c86686",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00c8670601",
+ "UMaskExt": "0x00c86706",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCIL",
+ "PerPkg": "1",
+ "UMask": "0x00c86fff01",
+ "UMaskExt": "0x00c86fff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL",
+ "PerPkg": "1",
+ "UMask": "0x00c86ffe01",
+ "UMaskExt": "0x00c86ffe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00c86f8a01",
+ "UMaskExt": "0x00c86f8a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00c86e8a01",
+ "UMaskExt": "0x00c86e8a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_PMM",
+ "PerPkg": "1",
+ "UMask": "0x00c86f0a01",
+ "UMaskExt": "0x00c86f0a",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00c86f8601",
+ "UMaskExt": "0x00c86f86",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00c86e8601",
+ "UMaskExt": "0x00c86e86",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_DDR",
+ "PerPkg": "1",
+ "UMask": "0x00c86f0601",
+ "UMaskExt": "0x00c86f06",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WbMtoIs issued by IO Devices",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x00cc23ff04",
+ "UMaskExt": "0x00cc23ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CLFlushes issued by IO Devices",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_CLFLUSH",
+ "PerPkg": "1",
+ "UMask": "0x00c8c3ff04",
+ "UMaskExt": "0x00c8c3ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0x00cd43fd04",
+ "UMaskExt": "0x00cd43fd",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "UMask": "0x00cd43fe04",
+ "UMaskExt": "0x00cd43fe",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PMM Access",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PMM",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000008",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PMM Access",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PMM",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000008",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DDR Access",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.DDR",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000004",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DDR Access",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.DDR",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000004",
+ "Unit": "CHA"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-power.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-power.json
new file mode 100644
index 000000000000..6299afe544cb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-power.json
@@ -0,0 +1,12 @@
+[
+ {
+ "BriefDescription": "PCU PCLK Clockticks",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x01",
+ "EventName": "UNC_P_CLOCKTICKS",
+ "PerPkg": "1",
+ "UMaskExt": "0x00000000",
+ "Unit": "PCU"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/virtual-memory.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/virtual-memory.json
new file mode 100644
index 000000000000..cba69368308e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/virtual-memory.json
@@ -0,0 +1,225 @@
+[
+ {
+ "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a demand load.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 1G page.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 1G page.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "CounterMask": "1",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a code (instruction fetch) request.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/cache.json b/tools/perf/pmu-events/arch/x86/skylake/cache.json
index c5d9a4ed10d7..c3183819bf52 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/cache.json
@@ -701,7 +701,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -714,7 +713,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC01C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -727,7 +725,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10001C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -740,7 +737,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4001C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -753,7 +749,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2001C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -766,7 +761,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x801C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -779,7 +773,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1001C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -792,7 +785,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x401C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -805,7 +797,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC0080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -818,7 +809,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -831,7 +821,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -844,7 +833,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -857,7 +845,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -870,7 +857,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -883,7 +869,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -896,7 +881,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC0040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -909,7 +893,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -922,7 +905,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -935,7 +917,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -948,7 +929,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -961,7 +941,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -974,7 +953,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -987,7 +965,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC0100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1000,7 +977,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1013,7 +989,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1026,7 +1001,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1039,7 +1013,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1052,7 +1025,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1065,7 +1037,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1078,7 +1049,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC0400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1091,7 +1061,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1104,7 +1073,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1117,7 +1085,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1130,7 +1097,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1143,7 +1109,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1156,7 +1121,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1169,7 +1133,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC0020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1182,7 +1145,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1195,7 +1157,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1208,7 +1169,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1221,7 +1181,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1234,7 +1193,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1247,7 +1205,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1260,7 +1217,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1273,7 +1229,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC01C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1286,7 +1241,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10001C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1299,7 +1253,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4001C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1312,7 +1265,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2001C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1325,7 +1277,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x801C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1338,7 +1289,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1001C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1351,7 +1301,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x401C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1364,7 +1313,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC0080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1377,7 +1325,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1390,7 +1337,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1403,7 +1349,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1416,7 +1361,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1429,7 +1373,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1442,7 +1385,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1455,7 +1397,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC0040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1468,7 +1409,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1481,7 +1421,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1494,7 +1433,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1507,7 +1445,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1520,7 +1457,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1533,7 +1469,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1546,7 +1481,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC0100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1559,7 +1493,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1572,7 +1505,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1585,7 +1517,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1598,7 +1529,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1611,7 +1541,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1624,7 +1553,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1637,7 +1565,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC0400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1650,7 +1577,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1663,7 +1589,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1676,7 +1601,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1689,7 +1613,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1702,7 +1625,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1715,7 +1637,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1728,7 +1649,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC0020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1741,7 +1661,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1754,7 +1673,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1767,7 +1685,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1780,7 +1697,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1793,7 +1709,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1806,7 +1721,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1819,7 +1733,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1832,7 +1745,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC01C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1845,7 +1757,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10001C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1858,7 +1769,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4001C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1871,7 +1781,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2001C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1884,7 +1793,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x801C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1897,7 +1805,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1001C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1910,7 +1817,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x401C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1923,7 +1829,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC0080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1936,7 +1841,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1949,7 +1853,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1962,7 +1865,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1975,7 +1877,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1988,7 +1889,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2001,7 +1901,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2014,7 +1913,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC0040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2027,7 +1925,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2040,7 +1937,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2053,7 +1949,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2066,7 +1961,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2079,7 +1973,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2092,7 +1985,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2105,7 +1997,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC0100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2118,7 +2009,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2131,7 +2021,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2144,7 +2033,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2157,7 +2045,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2170,7 +2057,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2183,7 +2069,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2196,7 +2081,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC0400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2209,7 +2093,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2222,7 +2105,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2235,7 +2117,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2248,7 +2129,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2261,7 +2141,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2274,7 +2153,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2287,7 +2165,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC0020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2300,7 +2177,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2313,7 +2189,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2326,7 +2201,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2339,7 +2213,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2352,7 +2225,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2365,7 +2237,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2378,7 +2249,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x18000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2391,7 +2261,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC01C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2404,7 +2273,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10001C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2417,7 +2285,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4001C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2430,7 +2297,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2001C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2443,7 +2309,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x801C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2456,7 +2321,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1001C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2469,7 +2333,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x401C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2482,7 +2345,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC0088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2495,7 +2357,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2508,7 +2369,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2521,7 +2381,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2534,7 +2393,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2547,7 +2405,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2560,7 +2417,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2573,7 +2429,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC0048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2586,7 +2441,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2599,7 +2453,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2612,7 +2465,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2625,7 +2477,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2638,7 +2489,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2651,7 +2501,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2664,7 +2513,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC0108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2677,7 +2525,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2690,7 +2537,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2703,7 +2549,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2716,7 +2561,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2729,7 +2573,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2742,7 +2585,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2755,7 +2597,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC0408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2768,7 +2609,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2781,7 +2621,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2794,7 +2633,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2807,7 +2645,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2820,7 +2657,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2833,7 +2669,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2846,7 +2681,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC0028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2859,7 +2693,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2872,7 +2705,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x400028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2885,7 +2717,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x200028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2898,7 +2729,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x80028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2911,7 +2741,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x100028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2924,7 +2753,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x40028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -2974,4 +2802,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x4"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/memory.json b/tools/perf/pmu-events/arch/x86/skylake/memory.json
index 8500fc65e0e8..74ea4ccb4c9a 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/memory.json
@@ -275,7 +275,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x20001C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -288,7 +287,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000080004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -301,7 +299,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000040004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -314,7 +311,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000100004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -327,7 +323,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FFC400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -340,7 +335,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -353,7 +347,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -366,7 +359,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -379,7 +371,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -392,7 +383,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x203C400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -405,7 +395,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -418,7 +407,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x7C400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -431,7 +419,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC4000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -444,7 +431,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -457,7 +443,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -470,7 +455,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -483,7 +467,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -496,7 +479,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2004000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -509,7 +491,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -522,7 +503,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x44000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -535,7 +515,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000400004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -548,7 +527,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000020004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -561,7 +539,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x20001C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -574,7 +551,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000080001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -587,7 +563,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000040001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -600,7 +575,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000100001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -613,7 +587,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FFC400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -626,7 +599,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -639,7 +611,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -652,7 +623,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -665,7 +635,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -678,7 +647,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x203C400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -691,7 +659,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -704,7 +671,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x7C400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -717,7 +683,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC4000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -730,7 +695,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -743,7 +707,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -756,7 +719,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -769,7 +731,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -782,7 +743,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2004000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -795,7 +755,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -808,7 +767,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x44000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -821,7 +779,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000400001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -834,7 +791,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000020001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -847,7 +803,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x20001C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -860,7 +815,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000080002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -873,7 +827,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000040002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -886,7 +839,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000100002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -899,7 +851,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FFC400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -912,7 +863,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -925,7 +875,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -938,7 +887,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -951,7 +899,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -964,7 +911,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x203C400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -977,7 +923,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -990,7 +935,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x7C400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1003,7 +947,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC4000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1016,7 +959,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1029,7 +971,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1042,7 +983,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1055,7 +995,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1068,7 +1007,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2004000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1081,7 +1019,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1094,7 +1031,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x44000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1107,7 +1043,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000400002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1120,7 +1055,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000020002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1133,7 +1067,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x20001C8000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1146,7 +1079,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000088000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1159,7 +1091,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000048000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1172,7 +1103,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000108000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1185,7 +1115,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FFC408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1198,7 +1127,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103C408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1211,7 +1139,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x43C408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1224,7 +1151,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x23C408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1237,7 +1163,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0xBC408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1250,7 +1175,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x203C408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1263,7 +1187,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x13C408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1276,7 +1199,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x7C408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1289,7 +1211,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FC4008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1302,7 +1223,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1004008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1315,7 +1235,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x404008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1328,7 +1247,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x204008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1341,7 +1259,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x84008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1354,7 +1271,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2004008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1367,7 +1283,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x104008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1380,7 +1295,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x44008000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1393,7 +1307,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000408000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1406,7 +1319,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x2000028000",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1608,4 +1520,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x40"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
index 12eabae3e224..79fda10ec4bb 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
@@ -417,6 +417,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Instruction decoders utilized in a cycle",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x55",
+ "EventName": "INST_DECODED.DECODERS",
+ "PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Instructions retired from execution.",
"Counter": "Fixed counter 0",
"CounterHTOff": "Fixed counter 0",
@@ -969,7 +979,7 @@
"BriefDescription": "Cycles with less than 10 actually retired uops.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "10",
+ "CounterMask": "16",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
"Invert": "1",
@@ -977,4 +987,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/cache.json b/tools/perf/pmu-events/arch/x86/skylakex/cache.json
index 6639e18a7068..e21010c0df41 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/cache.json
@@ -750,7 +750,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -763,7 +762,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -776,7 +774,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -789,7 +786,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -802,7 +798,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -815,7 +810,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -828,7 +822,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -841,7 +834,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -854,7 +846,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -867,7 +858,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -880,7 +870,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -893,7 +882,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -906,7 +894,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -919,7 +906,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -932,7 +918,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -945,7 +930,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -958,7 +942,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -971,7 +954,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -984,7 +966,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -997,7 +978,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1010,7 +990,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1023,7 +1002,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1036,7 +1014,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1049,7 +1026,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1062,7 +1038,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1075,7 +1050,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1088,7 +1062,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1101,7 +1074,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1114,7 +1086,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1127,7 +1098,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1140,7 +1110,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1153,7 +1122,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1166,7 +1134,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1179,7 +1146,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1192,7 +1158,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1205,7 +1170,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1218,7 +1182,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1231,7 +1194,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1244,7 +1206,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1257,7 +1218,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1270,7 +1230,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1283,7 +1242,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1296,7 +1254,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1309,7 +1266,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1322,7 +1278,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1335,7 +1290,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1348,7 +1302,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1361,7 +1314,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1374,7 +1326,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1387,7 +1338,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1400,7 +1350,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1413,7 +1362,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1426,7 +1374,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1439,7 +1386,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1452,7 +1398,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1465,7 +1410,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1478,7 +1422,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1491,7 +1434,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1504,7 +1446,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1517,7 +1458,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1530,7 +1470,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1543,7 +1482,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1556,7 +1494,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1569,7 +1506,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1582,7 +1518,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1595,7 +1530,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1608,7 +1542,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1621,7 +1554,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1634,7 +1566,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x10003C0100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1647,7 +1578,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x4003C0100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1660,7 +1590,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1003C0100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1673,7 +1602,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x8003C0100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1723,4 +1651,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x4"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/memory.json b/tools/perf/pmu-events/arch/x86/skylakex/memory.json
index 60c286b4fe54..a570fe3e7a2d 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/memory.json
@@ -275,7 +275,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -288,7 +287,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -301,7 +299,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -314,7 +311,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63FC00491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -327,7 +323,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -340,7 +335,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800491",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -353,7 +347,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -366,7 +359,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -379,7 +371,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -392,7 +383,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63FC00490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -405,7 +395,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -418,7 +407,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800490",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -431,7 +419,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -444,7 +431,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -457,7 +443,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -470,7 +455,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63FC00120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -483,7 +467,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -496,7 +479,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800120",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -509,7 +491,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -522,7 +503,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -535,7 +515,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -548,7 +527,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63FC00122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -561,7 +539,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -574,7 +551,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800122",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -587,7 +563,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -600,7 +575,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -613,7 +587,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -626,7 +599,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63FC00004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -639,7 +611,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -652,7 +623,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800004",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -665,7 +635,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -678,7 +647,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -691,7 +659,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -704,7 +671,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63FC00001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -717,7 +683,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -730,7 +695,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800001",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -743,7 +707,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -756,7 +719,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -769,7 +731,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -782,7 +743,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63FC00002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -795,7 +755,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -808,7 +767,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800002",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -821,7 +779,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -834,7 +791,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -847,7 +803,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -860,7 +815,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63FC00400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -873,7 +827,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -886,7 +839,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800400",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -899,7 +851,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -912,7 +863,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -925,7 +875,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -938,7 +887,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63FC00010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -951,7 +899,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -964,7 +911,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800010",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -977,7 +923,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -990,7 +935,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1003,7 +947,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1016,7 +959,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63FC00020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1029,7 +971,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1042,7 +983,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800020",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1055,7 +995,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1068,7 +1007,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1081,7 +1019,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1094,7 +1031,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63FC00080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1107,7 +1043,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1120,7 +1055,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800080",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1133,7 +1067,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3FBC000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1146,7 +1079,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x103FC00100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1159,7 +1091,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x83FC00100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1172,7 +1103,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63FC00100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1185,7 +1115,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x604000100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1198,7 +1127,6 @@
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x63B800100",
"Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
@@ -1400,4 +1328,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x40"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
index 12eabae3e224..79fda10ec4bb 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
@@ -417,6 +417,16 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Instruction decoders utilized in a cycle",
+ "Counter": "0,1,2,3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "EventCode": "0x55",
+ "EventName": "INST_DECODED.DECODERS",
+ "PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Instructions retired from execution.",
"Counter": "Fixed counter 0",
"CounterHTOff": "Fixed counter 0",
@@ -969,7 +979,7 @@
"BriefDescription": "Cycles with less than 10 actually retired uops.",
"Counter": "0,1,2,3",
"CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "10",
+ "CounterMask": "16",
"EventCode": "0xC2",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
"Invert": "1",
@@ -977,4 +987,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json
index 0b66e6af8177..4dcbac887380 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json
@@ -10,6 +10,16 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "read requests to memory controller",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
"Counter": "0,1,2,3",
"EventCode": "0x4",
@@ -20,6 +30,16 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "write requests to memory controller",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "Memory controller clock ticks",
"Counter": "0,1,2,3",
"EventName": "UNC_M_CLOCKTICKS",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json
index 06c5ca26ca3f..aa0f67613c4a 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json
@@ -17,6 +17,16 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "LLC misses - Uncacheable reads (from cpu) ",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "Filter": "config1=0x40e33",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "MMIO reads. Derived from unc_cha_tor_inserts.ia_miss",
"Counter": "0,1,2,3",
"EventCode": "0x35",
@@ -27,6 +37,16 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "MMIO reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "Filter": "config1=0x40040e33",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "MMIO writes. Derived from unc_cha_tor_inserts.ia_miss",
"Counter": "0,1,2,3",
"EventCode": "0x35",
@@ -37,6 +57,16 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "MMIO writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "Filter": "config1=0x40041e33",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "Streaming stores (full cache line). Derived from unc_cha_tor_inserts.ia_miss",
"Counter": "0,1,2,3",
"EventCode": "0x35",
@@ -48,6 +78,17 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "Streaming stores (full cache line)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "Filter": "config1=0x41833",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "Streaming stores (partial cache line). Derived from unc_cha_tor_inserts.ia_miss",
"Counter": "0,1,2,3",
"EventCode": "0x35",
@@ -59,6 +100,17 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "Streaming stores (partial cache line)",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "Filter": "config1=0x41a33",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "read requests from home agent",
"Counter": "0,1,2,3",
"EventCode": "0x50",
@@ -114,6 +166,16 @@
"Unit": "UPI LL"
},
{
+ "BriefDescription": "UPI interconnect send bandwidth for payload",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "ScaleUnit": "7.11E-06Bytes",
+ "UMask": "0xf",
+ "Unit": "UPI LL"
+ },
+ {
"BriefDescription": "PCI Express bandwidth reading at IIO, part 0",
"Counter": "0,1",
"EventCode": "0x83",
@@ -177,6 +239,21 @@
"Unit": "IIO"
},
{
+ "BriefDescription": "PCI Express bandwidth reading at IIO",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "Filter": "ch_mask=0x1f",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 +UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 +UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 +UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "MetricName": "LLC_MISSES.PCIE_READ",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
"BriefDescription": "PCI Express bandwidth writing at IIO, part 0",
"Counter": "0,1",
"EventCode": "0x83",
@@ -240,6 +317,21 @@
"Unit": "IIO"
},
{
+ "BriefDescription": "PCI Express bandwidth writing at IIO",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "Filter": "ch_mask=0x1f",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "MetricName": "LLC_MISSES.PCIE_WRITE",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
"BriefDescription": "Core Cross Snoops Issued; Multiple Core Requests",
"Counter": "0,1,2,3",
"EventCode": "0x33",
@@ -514,7 +606,7 @@
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSP_FWD_WB",
"PerPkg": "1",
- "PublicDescription": "Counts when a transaction with the opcode type Rsp*Fwd*WB Snoop Response was received which indicates the data was written back to it's home socket, and the cacheline was forwarded to the requestor socket. This snoop response is only used in >= 4 socket systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to it's home socket to be written back to memory.",
+ "PublicDescription": "Counts when a transaction with the opcode type Rsp*Fwd*WB Snoop Response was received which indicates the data was written back to its home socket, and the cacheline was forwarded to the requestor socket. This snoop response is only used in &gt;= 4 socket systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to its home socket to be written back to memory.",
"UMask": "0x20",
"Unit": "CHA"
},
@@ -524,7 +616,7 @@
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSP_WBWB",
"PerPkg": "1",
- "PublicDescription": "Counts when a transaction with the opcode type Rsp*WB Snoop Response was received which indicates which indicates the data was written back to it's home. This is returned when a non-RFO request hits a cacheline in the Modified state. The Cache can either downgrade the cacheline to a S (Shared) or I (Invalid) state depending on how the system has been configured. This reponse will also be sent when a cache requests E (Exclusive) ownership of a cache line without receiving data, because the cache must acquire ownership.",
+ "PublicDescription": "Counts when a transaction with the opcode type Rsp*WB Snoop Response was received which indicates which indicates the data was written back to its home. This is returned when a non-RFO request hits a cacheline in the Modified state. The Cache can either downgrade the cacheline to a S (Shared) or I (Invalid) state depending on how the system has been configured. This response will also be sent when a cache requests E (Exclusive) ownership of a cache line without receiving data, because the cache must acquire ownership.",
"UMask": "0x10",
"Unit": "CHA"
},
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/other.json b/tools/perf/pmu-events/arch/x86/tigerlake/other.json
index 304cd09fe159..65539490e18f 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/other.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/other.json
@@ -1,16 +1,5 @@
[
{
- "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
- "CollectPEBSRecord": "2",
- "Counter": "0,1,2,3,4,5,6,7",
- "EventCode": "0xc1",
- "EventName": "ASSISTS.ANY",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
- "SampleAfterValue": "100003",
- "UMask": "0x7"
- },
- {
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
@@ -57,4 +46,4 @@
"SampleAfterValue": "100003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json b/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json
index d436775c80db..a8aa1b455c77 100644
--- a/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json
@@ -12,6 +12,17 @@
"UMask": "0x9"
},
{
+ "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3,4,5,6,7",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.ANY",
+ "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
+ },
+ {
"BriefDescription": "All branch instructions retired.",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3,4,5,6,7",
@@ -1055,4 +1066,4 @@
"SampleAfterValue": "1000003",
"UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/tremontx/other.json b/tools/perf/pmu-events/arch/x86/tremontx/other.json
index 4f20f45a4898..2766e9dfc325 100644
--- a/tools/perf/pmu-events/arch/x86/tremontx/other.json
+++ b/tools/perf/pmu-events/arch/x86/tremontx/other.json
@@ -1,16 +1,5 @@
[
{
- "BriefDescription": "Counts the total number of BTCLEARS.",
- "CollectPEBSRecord": "2",
- "Counter": "0,1,2,3",
- "EventCode": "0xe8",
- "EventName": "BTCLEAR.ANY",
- "PDIR_COUNTER": "na",
- "PEBScounters": "0,1,2,3",
- "PublicDescription": "Counts the total number of BTCLEARS which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
- "SampleAfterValue": "200003"
- },
- {
"BriefDescription": "This event is deprecated. Refer to new event BUS_LOCK.SELF_LOCKS",
"CollectPEBSRecord": "2",
"Counter": "0,1,2,3",
@@ -683,4 +672,4 @@
"SampleAfterValue": "100003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/tremontx/pipeline.json b/tools/perf/pmu-events/arch/x86/tremontx/pipeline.json
index 0a77e9f9a16a..38dc8044767b 100644
--- a/tools/perf/pmu-events/arch/x86/tremontx/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/tremontx/pipeline.json
@@ -165,6 +165,17 @@
"UMask": "0xfe"
},
{
+ "BriefDescription": "Counts the total number of BTCLEARS.",
+ "CollectPEBSRecord": "2",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xe8",
+ "EventName": "BTCLEAR.ANY",
+ "PDIR_COUNTER": "na",
+ "PEBScounters": "0,1,2,3",
+ "PublicDescription": "Counts the total number of BTCLEARS which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
+ "SampleAfterValue": "200003"
+ },
+ {
"BriefDescription": "Counts the number of unhalted core clock cycles. (Fixed event)",
"CollectPEBSRecord": "2",
"Counter": "Fixed counter 1",
@@ -671,4 +682,4 @@
"SampleAfterValue": "2000003",
"UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/tremontx/uncore-memory.json b/tools/perf/pmu-events/arch/x86/tremontx/uncore-memory.json
index 0d342efae154..b7ff25a5d717 100644
--- a/tools/perf/pmu-events/arch/x86/tremontx/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/tremontx/uncore-memory.json
@@ -11,6 +11,17 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "read requests to memory controller",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x0f",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
@@ -22,6 +33,17 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "write requests to memory controller",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x30",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "Memory controller clock ticks",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
diff --git a/tools/perf/pmu-events/arch/x86/tremontx/uncore-other.json b/tools/perf/pmu-events/arch/x86/tremontx/uncore-other.json
index 0f73582248f9..5194ce1b4390 100644
--- a/tools/perf/pmu-events/arch/x86/tremontx/uncore-other.json
+++ b/tools/perf/pmu-events/arch/x86/tremontx/uncore-other.json
@@ -20,6 +20,18 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "LLC misses - Uncacheable reads (from cpu) ",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "Filter": "config1=0x40e33",
+ "PerPkg": "1",
+ "UMask": "0xC001FE01",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "MMIO reads. Derived from unc_cha_tor_inserts.ia_miss",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
@@ -32,6 +44,18 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "MMIO reads",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "Filter": "config1=0x40040e33",
+ "PerPkg": "1",
+ "UMask": "0xC001FE01",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "MMIO writes. Derived from unc_cha_tor_inserts.ia_miss",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
@@ -44,6 +68,18 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "MMIO writes",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "Filter": "config1=0x40041e33",
+ "PerPkg": "1",
+ "UMask": "0xC001FE01",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "Streaming stores (full cache line). Derived from unc_cha_tor_inserts.ia_miss",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
@@ -57,6 +93,19 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "Streaming stores (full cache line)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "Filter": "config1=0x41833",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0xC001FE01",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "Streaming stores (partial cache line). Derived from unc_cha_tor_inserts.ia_miss",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
@@ -70,6 +119,19 @@
"Unit": "CHA"
},
{
+ "BriefDescription": "Streaming stores (partial cache line)",
+ "Counter": "0,1,2,3",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "Filter": "config1=0x41a33",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0xC001FE01",
+ "UMaskExt": "0xC001FE",
+ "Unit": "CHA"
+ },
+ {
"BriefDescription": "read requests from home agent",
"Counter": "0,1,2,3",
"CounterType": "PGMABLE",
@@ -106,6 +168,22 @@
"Unit": "IIO"
},
{
+ "BriefDescription": "PCI Express bandwidth reading at IIO",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "Filter": "ch_mask=0x1f",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 +UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 +UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 +UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "MetricName": "LLC_MISSES.PCIE_READ",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
"BriefDescription": "PCI Express bandwidth writing at IIO. Derived from unc_iio_data_req_of_cpu.mem_write.part0",
"Counter": "0,1",
"CounterType": "PGMABLE",
@@ -122,6 +200,22 @@
"Unit": "IIO"
},
{
+ "BriefDescription": "PCI Express bandwidth writing at IIO",
+ "Counter": "0,1",
+ "CounterType": "PGMABLE",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "Filter": "ch_mask=0x1f",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "MetricName": "LLC_MISSES.PCIE_WRITE",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
"BriefDescription": "PCI Express bandwidth writing at IIO, part 1",
"Counter": "0,1",
"CounterType": "PGMABLE",
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/other.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/other.json
index 23dcd554728c..67bc34984fa8 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-dp/other.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/other.json
@@ -1,29 +1,5 @@
[
{
- "BriefDescription": "Early Branch Prediciton Unit clears",
- "Counter": "0,1,2,3",
- "EventCode": "0xE8",
- "EventName": "BPU_CLEARS.EARLY",
- "SampleAfterValue": "2000000",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Late Branch Prediction Unit clears",
- "Counter": "0,1,2,3",
- "EventCode": "0xE8",
- "EventName": "BPU_CLEARS.LATE",
- "SampleAfterValue": "2000000",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "Branch prediction unit missed call or return",
- "Counter": "0,1,2,3",
- "EventCode": "0xE5",
- "EventName": "BPU_MISSED_CALL_RET",
- "SampleAfterValue": "2000000",
- "UMask": "0x1"
- },
- {
"BriefDescription": "ES segment renames",
"Counter": "0,1,2,3",
"EventCode": "0xD5",
@@ -128,46 +104,6 @@
"UMask": "0x1"
},
{
- "BriefDescription": "All RAT stall cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xD2",
- "EventName": "RAT_STALLS.ANY",
- "SampleAfterValue": "2000000",
- "UMask": "0xf"
- },
- {
- "BriefDescription": "Flag stall cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xD2",
- "EventName": "RAT_STALLS.FLAGS",
- "SampleAfterValue": "2000000",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Partial register stall cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xD2",
- "EventName": "RAT_STALLS.REGISTERS",
- "SampleAfterValue": "2000000",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "ROB read port stalls cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xD2",
- "EventName": "RAT_STALLS.ROB_READ_PORT",
- "SampleAfterValue": "2000000",
- "UMask": "0x4"
- },
- {
- "BriefDescription": "Scoreboard stall cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xD2",
- "EventName": "RAT_STALLS.SCOREBOARD",
- "SampleAfterValue": "2000000",
- "UMask": "0x8"
- },
- {
"BriefDescription": "All Store buffer stall cycles",
"Counter": "0,1,2,3",
"EventCode": "0x4",
@@ -284,4 +220,4 @@
"SampleAfterValue": "2000000",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/pipeline.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/pipeline.json
index 10140f460fbb..403fb2b87fc4 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-dp/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/pipeline.json
@@ -51,6 +51,30 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Early Branch Prediciton Unit clears",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE8",
+ "EventName": "BPU_CLEARS.EARLY",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Late Branch Prediction Unit clears",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE8",
+ "EventName": "BPU_CLEARS.LATE",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Branch prediction unit missed call or return",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE5",
+ "EventName": "BPU_MISSED_CALL_RET",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Branch instructions decoded",
"Counter": "0,1,2,3",
"EventCode": "0xE0",
@@ -495,6 +519,46 @@
"UMask": "0x4"
},
{
+ "BriefDescription": "All RAT stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.ANY",
+ "SampleAfterValue": "2000000",
+ "UMask": "0xf"
+ },
+ {
+ "BriefDescription": "Flag stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.FLAGS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Partial register stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.REGISTERS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "ROB read port stalls cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.ROB_READ_PORT",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Scoreboard stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.SCOREBOARD",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x8"
+ },
+ {
"BriefDescription": "Resource related stall cycles",
"Counter": "0,1,2,3",
"EventCode": "0xA2",
@@ -896,4 +960,4 @@
"SampleAfterValue": "2000000",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json
index 2ecd80f8fa67..c5f33fe2a3ce 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json
@@ -1769,7 +1769,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the IO, CSR, MMIO unit",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the IO, CSR, MMIO unit",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.IO_CSR_MMIO",
@@ -1780,7 +1780,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the LLC and not found in a sibling core",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and not found in a sibling core",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_NO_OTHER_CORE",
@@ -1791,7 +1791,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the LLC and HIT in a sibling core",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and HIT in a sibling core",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HIT",
@@ -1802,7 +1802,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the LLC and HITM in a sibling core",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and HITM in a sibling core",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HITM",
@@ -1857,7 +1857,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches that HIT in a remote cache",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches that HIT in a remote cache",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HIT",
@@ -1868,7 +1868,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches that HITM in a remote cache",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches that HITM in a remote cache",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HITM",
@@ -3230,4 +3230,4 @@
"SampleAfterValue": "200000",
"UMask": "0x8"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/memory.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/memory.json
index 623a0087c8f3..f14e760a9ddc 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-sp/memory.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/memory.json
@@ -286,7 +286,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the local DRAM.",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the local DRAM.",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_DRAM",
@@ -297,7 +297,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the remote DRAM",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the remote DRAM",
"Counter": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_DRAM",
@@ -736,4 +736,4 @@
"SampleAfterValue": "100000",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/other.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/other.json
index 23dcd554728c..67bc34984fa8 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-sp/other.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/other.json
@@ -1,29 +1,5 @@
[
{
- "BriefDescription": "Early Branch Prediciton Unit clears",
- "Counter": "0,1,2,3",
- "EventCode": "0xE8",
- "EventName": "BPU_CLEARS.EARLY",
- "SampleAfterValue": "2000000",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Late Branch Prediction Unit clears",
- "Counter": "0,1,2,3",
- "EventCode": "0xE8",
- "EventName": "BPU_CLEARS.LATE",
- "SampleAfterValue": "2000000",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "Branch prediction unit missed call or return",
- "Counter": "0,1,2,3",
- "EventCode": "0xE5",
- "EventName": "BPU_MISSED_CALL_RET",
- "SampleAfterValue": "2000000",
- "UMask": "0x1"
- },
- {
"BriefDescription": "ES segment renames",
"Counter": "0,1,2,3",
"EventCode": "0xD5",
@@ -128,46 +104,6 @@
"UMask": "0x1"
},
{
- "BriefDescription": "All RAT stall cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xD2",
- "EventName": "RAT_STALLS.ANY",
- "SampleAfterValue": "2000000",
- "UMask": "0xf"
- },
- {
- "BriefDescription": "Flag stall cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xD2",
- "EventName": "RAT_STALLS.FLAGS",
- "SampleAfterValue": "2000000",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Partial register stall cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xD2",
- "EventName": "RAT_STALLS.REGISTERS",
- "SampleAfterValue": "2000000",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "ROB read port stalls cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xD2",
- "EventName": "RAT_STALLS.ROB_READ_PORT",
- "SampleAfterValue": "2000000",
- "UMask": "0x4"
- },
- {
- "BriefDescription": "Scoreboard stall cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xD2",
- "EventName": "RAT_STALLS.SCOREBOARD",
- "SampleAfterValue": "2000000",
- "UMask": "0x8"
- },
- {
"BriefDescription": "All Store buffer stall cycles",
"Counter": "0,1,2,3",
"EventCode": "0x4",
@@ -284,4 +220,4 @@
"SampleAfterValue": "2000000",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/pipeline.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/pipeline.json
index 10140f460fbb..403fb2b87fc4 100644
--- a/tools/perf/pmu-events/arch/x86/westmereep-sp/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/pipeline.json
@@ -51,6 +51,30 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Early Branch Prediciton Unit clears",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE8",
+ "EventName": "BPU_CLEARS.EARLY",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Late Branch Prediction Unit clears",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE8",
+ "EventName": "BPU_CLEARS.LATE",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Branch prediction unit missed call or return",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE5",
+ "EventName": "BPU_MISSED_CALL_RET",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Branch instructions decoded",
"Counter": "0,1,2,3",
"EventCode": "0xE0",
@@ -495,6 +519,46 @@
"UMask": "0x4"
},
{
+ "BriefDescription": "All RAT stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.ANY",
+ "SampleAfterValue": "2000000",
+ "UMask": "0xf"
+ },
+ {
+ "BriefDescription": "Flag stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.FLAGS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Partial register stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.REGISTERS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "ROB read port stalls cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.ROB_READ_PORT",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Scoreboard stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.SCOREBOARD",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x8"
+ },
+ {
"BriefDescription": "Resource related stall cycles",
"Counter": "0,1,2,3",
"EventCode": "0xA2",
@@ -896,4 +960,4 @@
"SampleAfterValue": "2000000",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/cache.json b/tools/perf/pmu-events/arch/x86/westmereex/cache.json
index 23de93ea347a..d6243d008bfe 100644
--- a/tools/perf/pmu-events/arch/x86/westmereex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/westmereex/cache.json
@@ -1761,7 +1761,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the IO, CSR, MMIO unit",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the IO, CSR, MMIO unit",
"Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.IO_CSR_MMIO",
@@ -1772,7 +1772,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the LLC and not found in a sibling core",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and not found in a sibling core",
"Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_NO_OTHER_CORE",
@@ -1783,7 +1783,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the LLC and HIT in a sibling core",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and HIT in a sibling core",
"Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HIT",
@@ -1794,7 +1794,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the LLC and HITM in a sibling core",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and HITM in a sibling core",
"Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HITM",
@@ -1849,7 +1849,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches that HIT in a remote cache",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches that HIT in a remote cache",
"Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HIT",
@@ -1860,7 +1860,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches that HITM in a remote cache",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches that HITM in a remote cache",
"Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HITM",
@@ -3222,4 +3222,4 @@
"SampleAfterValue": "200000",
"UMask": "0x8"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/memory.json b/tools/perf/pmu-events/arch/x86/westmereex/memory.json
index a2132858b9c1..1f8cfabe08c0 100644
--- a/tools/perf/pmu-events/arch/x86/westmereex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/westmereex/memory.json
@@ -294,7 +294,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the local DRAM.",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the local DRAM.",
"Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_DRAM",
@@ -305,7 +305,7 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the remote DRAM",
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the remote DRAM",
"Counter": "2",
"EventCode": "0xB7",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_DRAM",
@@ -744,4 +744,4 @@
"SampleAfterValue": "100000",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/other.json b/tools/perf/pmu-events/arch/x86/westmereex/other.json
index 23dcd554728c..67bc34984fa8 100644
--- a/tools/perf/pmu-events/arch/x86/westmereex/other.json
+++ b/tools/perf/pmu-events/arch/x86/westmereex/other.json
@@ -1,29 +1,5 @@
[
{
- "BriefDescription": "Early Branch Prediciton Unit clears",
- "Counter": "0,1,2,3",
- "EventCode": "0xE8",
- "EventName": "BPU_CLEARS.EARLY",
- "SampleAfterValue": "2000000",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Late Branch Prediction Unit clears",
- "Counter": "0,1,2,3",
- "EventCode": "0xE8",
- "EventName": "BPU_CLEARS.LATE",
- "SampleAfterValue": "2000000",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "Branch prediction unit missed call or return",
- "Counter": "0,1,2,3",
- "EventCode": "0xE5",
- "EventName": "BPU_MISSED_CALL_RET",
- "SampleAfterValue": "2000000",
- "UMask": "0x1"
- },
- {
"BriefDescription": "ES segment renames",
"Counter": "0,1,2,3",
"EventCode": "0xD5",
@@ -128,46 +104,6 @@
"UMask": "0x1"
},
{
- "BriefDescription": "All RAT stall cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xD2",
- "EventName": "RAT_STALLS.ANY",
- "SampleAfterValue": "2000000",
- "UMask": "0xf"
- },
- {
- "BriefDescription": "Flag stall cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xD2",
- "EventName": "RAT_STALLS.FLAGS",
- "SampleAfterValue": "2000000",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Partial register stall cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xD2",
- "EventName": "RAT_STALLS.REGISTERS",
- "SampleAfterValue": "2000000",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "ROB read port stalls cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xD2",
- "EventName": "RAT_STALLS.ROB_READ_PORT",
- "SampleAfterValue": "2000000",
- "UMask": "0x4"
- },
- {
- "BriefDescription": "Scoreboard stall cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xD2",
- "EventName": "RAT_STALLS.SCOREBOARD",
- "SampleAfterValue": "2000000",
- "UMask": "0x8"
- },
- {
"BriefDescription": "All Store buffer stall cycles",
"Counter": "0,1,2,3",
"EventCode": "0x4",
@@ -284,4 +220,4 @@
"SampleAfterValue": "2000000",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/pipeline.json b/tools/perf/pmu-events/arch/x86/westmereex/pipeline.json
index 620d9084d860..7d6c2c1e0db0 100644
--- a/tools/perf/pmu-events/arch/x86/westmereex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/westmereex/pipeline.json
@@ -51,6 +51,30 @@
"UMask": "0x1"
},
{
+ "BriefDescription": "Early Branch Prediciton Unit clears",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE8",
+ "EventName": "BPU_CLEARS.EARLY",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Late Branch Prediction Unit clears",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE8",
+ "EventName": "BPU_CLEARS.LATE",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Branch prediction unit missed call or return",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE5",
+ "EventName": "BPU_MISSED_CALL_RET",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Branch instructions decoded",
"Counter": "0,1,2,3",
"EventCode": "0xE0",
@@ -495,6 +519,46 @@
"UMask": "0x4"
},
{
+ "BriefDescription": "All RAT stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.ANY",
+ "SampleAfterValue": "2000000",
+ "UMask": "0xf"
+ },
+ {
+ "BriefDescription": "Flag stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.FLAGS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Partial register stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.REGISTERS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "ROB read port stalls cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.ROB_READ_PORT",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Scoreboard stall cycles",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.SCOREBOARD",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x8"
+ },
+ {
"BriefDescription": "Resource related stall cycles",
"Counter": "0,1,2,3",
"EventCode": "0xA2",
@@ -894,4 +958,4 @@
"SampleAfterValue": "2000000",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index 159d9eab6e79..e597e4bac90f 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -207,21 +207,6 @@ static struct msrmap {
{ NULL, NULL }
};
-static struct field {
- const char *field;
- const char *kernel;
-} fields[] = {
- { "UMask", "umask=" },
- { "CounterMask", "cmask=" },
- { "Invert", "inv=" },
- { "AnyThread", "any=" },
- { "EdgeDetect", "edge=" },
- { "SampleAfterValue", "period=" },
- { "FCMask", "fc_mask=" },
- { "PortMask", "ch_mask=" },
- { NULL, NULL }
-};
-
static void cut_comma(char *map, jsmntok_t *newval)
{
int i;
@@ -233,21 +218,6 @@ static void cut_comma(char *map, jsmntok_t *newval)
}
}
-static int match_field(char *map, jsmntok_t *field, int nz,
- char **event, jsmntok_t *val)
-{
- struct field *f;
- jsmntok_t newval = *val;
-
- for (f = fields; f->field; f++)
- if (json_streq(map, field, f->field) && nz) {
- cut_comma(map, &newval);
- addfield(map, event, ",", f->kernel, &newval);
- return 1;
- }
- return 0;
-}
-
static struct msrmap *lookup_msr(char *map, jsmntok_t *val)
{
jsmntok_t newval = *val;
@@ -581,6 +551,14 @@ static int json_events(const char *fn,
jsmntok_t *precise = NULL;
jsmntok_t *obj = tok++;
bool configcode_present = false;
+ char *umask = NULL;
+ char *cmask = NULL;
+ char *inv = NULL;
+ char *any = NULL;
+ char *edge = NULL;
+ char *period = NULL;
+ char *fc_mask = NULL;
+ char *ch_mask = NULL;
EXPECT(obj->type == JSMN_OBJECT, obj, "expected object");
for (j = 0; j < obj->size; j += 2) {
@@ -596,8 +574,23 @@ static int json_events(const char *fn,
"Expected string value");
nz = !json_streq(map, val, "0");
- if (match_field(map, field, nz, &event, val)) {
- /* ok */
+ /* match_field */
+ if (json_streq(map, field, "UMask") && nz) {
+ addfield(map, &umask, "", "umask=", val);
+ } else if (json_streq(map, field, "CounterMask") && nz) {
+ addfield(map, &cmask, "", "cmask=", val);
+ } else if (json_streq(map, field, "Invert") && nz) {
+ addfield(map, &inv, "", "inv=", val);
+ } else if (json_streq(map, field, "AnyThread") && nz) {
+ addfield(map, &any, "", "any=", val);
+ } else if (json_streq(map, field, "EdgeDetect") && nz) {
+ addfield(map, &edge, "", "edge=", val);
+ } else if (json_streq(map, field, "SampleAfterValue") && nz) {
+ addfield(map, &period, "", "period=", val);
+ } else if (json_streq(map, field, "FCMask") && nz) {
+ addfield(map, &fc_mask, "", "fc_mask=", val);
+ } else if (json_streq(map, field, "PortMask") && nz) {
+ addfield(map, &ch_mask, "", "ch_mask=", val);
} else if (json_streq(map, field, "EventCode")) {
char *code = NULL;
addfield(map, &code, "", "", val);
@@ -612,7 +605,7 @@ static int json_events(const char *fn,
} else if (json_streq(map, field, "ExtSel")) {
char *code = NULL;
addfield(map, &code, "", "", val);
- eventcode |= strtoul(code, NULL, 0) << 21;
+ eventcode |= strtoul(code, NULL, 0) << 8;
free(code);
} else if (json_streq(map, field, "EventName")) {
addfield(map, &je.name, "", "", val);
@@ -652,9 +645,6 @@ static int json_events(const char *fn,
for (s = je.pmu; *s; s++)
*s = tolower(*s);
}
- addfield(map, &je.desc, ". ", "Unit: ", NULL);
- addfield(map, &je.desc, "", je.pmu, NULL);
- addfield(map, &je.desc, "", " ", NULL);
} else if (json_streq(map, field, "Filter")) {
addfield(map, &filter, "", "", val);
} else if (json_streq(map, field, "ScaleUnit")) {
@@ -693,10 +683,32 @@ static int json_events(const char *fn,
else
snprintf(buf, sizeof buf, "event=%#llx", eventcode);
addfield(map, &event, ",", buf, NULL);
+ if (any)
+ addfield(map, &event, ",", any, NULL);
+ if (ch_mask)
+ addfield(map, &event, ",", ch_mask, NULL);
+ if (cmask)
+ addfield(map, &event, ",", cmask, NULL);
+ if (edge)
+ addfield(map, &event, ",", edge, NULL);
+ if (fc_mask)
+ addfield(map, &event, ",", fc_mask, NULL);
+ if (inv)
+ addfield(map, &event, ",", inv, NULL);
+ if (period)
+ addfield(map, &event, ",", period, NULL);
+ if (umask)
+ addfield(map, &event, ",", umask, NULL);
+
if (je.desc && extra_desc)
addfield(map, &je.desc, " ", extra_desc, NULL);
if (je.long_desc && extra_desc)
addfield(map, &je.long_desc, " ", extra_desc, NULL);
+ if (je.pmu) {
+ addfield(map, &je.desc, ". ", "Unit: ", NULL);
+ addfield(map, &je.desc, "", je.pmu, NULL);
+ addfield(map, &je.desc, "", " ", NULL);
+ }
if (filter)
addfield(map, &event, ",", filter, NULL);
if (msr != NULL)
@@ -716,6 +728,14 @@ static int json_events(const char *fn,
je.event = real_event(je.name, event);
err = func(data, &je);
free_strings:
+ free(umask);
+ free(cmask);
+ free(inv);
+ free(any);
+ free(edge);
+ free(period);
+ free(fc_mask);
+ free(ch_mask);
free(event);
free(je.desc);
free(je.name);
diff --git a/tools/perf/scripts/python/arm-cs-trace-disasm.py b/tools/perf/scripts/python/arm-cs-trace-disasm.py
new file mode 100755
index 000000000000..4339692a8d0b
--- /dev/null
+++ b/tools/perf/scripts/python/arm-cs-trace-disasm.py
@@ -0,0 +1,274 @@
+# SPDX-License-Identifier: GPL-2.0
+# arm-cs-trace-disasm.py: ARM CoreSight Trace Dump With Disassember
+#
+# Author: Tor Jeremiassen <tor@ti.com>
+# Mathieu Poirier <mathieu.poirier@linaro.org>
+# Leo Yan <leo.yan@linaro.org>
+# Al Grant <Al.Grant@arm.com>
+
+from __future__ import print_function
+import os
+from os import path
+import sys
+import re
+from subprocess import *
+from optparse import OptionParser, make_option
+
+from perf_trace_context import perf_set_itrace_options, \
+ perf_sample_insn, perf_sample_srccode
+
+# Below are some example commands for using this script.
+#
+# Output disassembly with objdump:
+# perf script -s scripts/python/arm-cs-trace-disasm.py \
+# -- -d objdump -k path/to/vmlinux
+# Output disassembly with llvm-objdump:
+# perf script -s scripts/python/arm-cs-trace-disasm.py \
+# -- -d llvm-objdump-11 -k path/to/vmlinux
+# Output only source line and symbols:
+# perf script -s scripts/python/arm-cs-trace-disasm.py
+
+# Command line parsing.
+option_list = [
+ # formatting options for the bottom entry of the stack
+ make_option("-k", "--vmlinux", dest="vmlinux_name",
+ help="Set path to vmlinux file"),
+ make_option("-d", "--objdump", dest="objdump_name",
+ help="Set path to objdump executable file"),
+ make_option("-v", "--verbose", dest="verbose",
+ action="store_true", default=False,
+ help="Enable debugging log")
+]
+
+parser = OptionParser(option_list=option_list)
+(options, args) = parser.parse_args()
+
+# Initialize global dicts and regular expression
+disasm_cache = dict()
+cpu_data = dict()
+disasm_re = re.compile("^\s*([0-9a-fA-F]+):")
+disasm_func_re = re.compile("^\s*([0-9a-fA-F]+)\s.*:")
+cache_size = 64*1024
+
+glb_source_file_name = None
+glb_line_number = None
+glb_dso = None
+
+def get_optional(perf_dict, field):
+ if field in perf_dict:
+ return perf_dict[field]
+ return "[unknown]"
+
+def get_offset(perf_dict, field):
+ if field in perf_dict:
+ return "+%#x" % perf_dict[field]
+ return ""
+
+def get_dso_file_path(dso_name, dso_build_id):
+ if (dso_name == "[kernel.kallsyms]" or dso_name == "vmlinux"):
+ if (options.vmlinux_name):
+ return options.vmlinux_name;
+ else:
+ return dso_name
+
+ if (dso_name == "[vdso]") :
+ append = "/vdso"
+ else:
+ append = "/elf"
+
+ dso_path = os.environ['PERF_BUILDID_DIR'] + "/" + dso_name + "/" + dso_build_id + append;
+ # Replace duplicate slash chars to single slash char
+ dso_path = dso_path.replace('//', '/', 1)
+ return dso_path
+
+def read_disam(dso_fname, dso_start, start_addr, stop_addr):
+ addr_range = str(start_addr) + ":" + str(stop_addr) + ":" + dso_fname
+
+ # Don't let the cache get too big, clear it when it hits max size
+ if (len(disasm_cache) > cache_size):
+ disasm_cache.clear();
+
+ if addr_range in disasm_cache:
+ disasm_output = disasm_cache[addr_range];
+ else:
+ start_addr = start_addr - dso_start;
+ stop_addr = stop_addr - dso_start;
+ disasm = [ options.objdump_name, "-d", "-z",
+ "--start-address="+format(start_addr,"#x"),
+ "--stop-address="+format(stop_addr,"#x") ]
+ disasm += [ dso_fname ]
+ disasm_output = check_output(disasm).decode('utf-8').split('\n')
+ disasm_cache[addr_range] = disasm_output
+
+ return disasm_output
+
+def print_disam(dso_fname, dso_start, start_addr, stop_addr):
+ for line in read_disam(dso_fname, dso_start, start_addr, stop_addr):
+ m = disasm_func_re.search(line)
+ if m is None:
+ m = disasm_re.search(line)
+ if m is None:
+ continue
+ print("\t" + line)
+
+def print_sample(sample):
+ print("Sample = { cpu: %04d addr: 0x%016x phys_addr: 0x%016x ip: 0x%016x " \
+ "pid: %d tid: %d period: %d time: %d }" % \
+ (sample['cpu'], sample['addr'], sample['phys_addr'], \
+ sample['ip'], sample['pid'], sample['tid'], \
+ sample['period'], sample['time']))
+
+def trace_begin():
+ print('ARM CoreSight Trace Data Assembler Dump')
+
+def trace_end():
+ print('End')
+
+def trace_unhandled(event_name, context, event_fields_dict):
+ print(' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]))
+
+def common_start_str(comm, sample):
+ sec = int(sample["time"] / 1000000000)
+ ns = sample["time"] % 1000000000
+ cpu = sample["cpu"]
+ pid = sample["pid"]
+ tid = sample["tid"]
+ return "%16s %5u/%-5u [%04u] %9u.%09u " % (comm, pid, tid, cpu, sec, ns)
+
+# This code is copied from intel-pt-events.py for printing source code
+# line and symbols.
+def print_srccode(comm, param_dict, sample, symbol, dso):
+ ip = sample["ip"]
+ if symbol == "[unknown]":
+ start_str = common_start_str(comm, sample) + ("%x" % ip).rjust(16).ljust(40)
+ else:
+ offs = get_offset(param_dict, "symoff")
+ start_str = common_start_str(comm, sample) + (symbol + offs).ljust(40)
+
+ global glb_source_file_name
+ global glb_line_number
+ global glb_dso
+
+ source_file_name, line_number, source_line = perf_sample_srccode(perf_script_context)
+ if source_file_name:
+ if glb_line_number == line_number and glb_source_file_name == source_file_name:
+ src_str = ""
+ else:
+ if len(source_file_name) > 40:
+ src_file = ("..." + source_file_name[-37:]) + " "
+ else:
+ src_file = source_file_name.ljust(41)
+
+ if source_line is None:
+ src_str = src_file + str(line_number).rjust(4) + " <source not found>"
+ else:
+ src_str = src_file + str(line_number).rjust(4) + " " + source_line
+ glb_dso = None
+ elif dso == glb_dso:
+ src_str = ""
+ else:
+ src_str = dso
+ glb_dso = dso
+
+ glb_line_number = line_number
+ glb_source_file_name = source_file_name
+
+ print(start_str, src_str)
+
+def process_event(param_dict):
+ global cache_size
+ global options
+
+ sample = param_dict["sample"]
+ comm = param_dict["comm"]
+
+ name = param_dict["ev_name"]
+ dso = get_optional(param_dict, "dso")
+ dso_bid = get_optional(param_dict, "dso_bid")
+ dso_start = get_optional(param_dict, "dso_map_start")
+ dso_end = get_optional(param_dict, "dso_map_end")
+ symbol = get_optional(param_dict, "symbol")
+
+ if (options.verbose == True):
+ print("Event type: %s" % name)
+ print_sample(sample)
+
+ # If cannot find dso so cannot dump assembler, bail out
+ if (dso == '[unknown]'):
+ return
+
+ # Validate dso start and end addresses
+ if ((dso_start == '[unknown]') or (dso_end == '[unknown]')):
+ print("Failed to find valid dso map for dso %s" % dso)
+ return
+
+ if (name[0:12] == "instructions"):
+ print_srccode(comm, param_dict, sample, symbol, dso)
+ return
+
+ # Don't proceed if this event is not a branch sample, .
+ if (name[0:8] != "branches"):
+ return
+
+ cpu = sample["cpu"]
+ ip = sample["ip"]
+ addr = sample["addr"]
+
+ # Initialize CPU data if it's empty, and directly return back
+ # if this is the first tracing event for this CPU.
+ if (cpu_data.get(str(cpu) + 'addr') == None):
+ cpu_data[str(cpu) + 'addr'] = addr
+ return
+
+ # The format for packet is:
+ #
+ # +------------+------------+------------+
+ # sample_prev: | addr | ip | cpu |
+ # +------------+------------+------------+
+ # sample_next: | addr | ip | cpu |
+ # +------------+------------+------------+
+ #
+ # We need to combine the two continuous packets to get the instruction
+ # range for sample_prev::cpu:
+ #
+ # [ sample_prev::addr .. sample_next::ip ]
+ #
+ # For this purose, sample_prev::addr is stored into cpu_data structure
+ # and read back for 'start_addr' when the new packet comes, and we need
+ # to use sample_next::ip to calculate 'stop_addr', plusing extra 4 for
+ # 'stop_addr' is for the sake of objdump so the final assembler dump can
+ # include last instruction for sample_next::ip.
+ start_addr = cpu_data[str(cpu) + 'addr']
+ stop_addr = ip + 4
+
+ # Record for previous sample packet
+ cpu_data[str(cpu) + 'addr'] = addr
+
+ # Handle CS_ETM_TRACE_ON packet if start_addr=0 and stop_addr=4
+ if (start_addr == 0 and stop_addr == 4):
+ print("CPU%d: CS_ETM_TRACE_ON packet is inserted" % cpu)
+ return
+
+ if (start_addr < int(dso_start) or start_addr > int(dso_end)):
+ print("Start address 0x%x is out of range [ 0x%x .. 0x%x ] for dso %s" % (start_addr, int(dso_start), int(dso_end), dso))
+ return
+
+ if (stop_addr < int(dso_start) or stop_addr > int(dso_end)):
+ print("Stop address 0x%x is out of range [ 0x%x .. 0x%x ] for dso %s" % (stop_addr, int(dso_start), int(dso_end), dso))
+ return
+
+ if (options.objdump_name != None):
+ # It doesn't need to decrease virtual memory offset for disassembly
+ # for kernel dso, so in this case we set vm_start to zero.
+ if (dso == "[kernel.kallsyms]"):
+ dso_vm_start = 0
+ else:
+ dso_vm_start = int(dso_start)
+
+ dso_fname = get_dso_file_path(dso, dso_bid)
+ if path.exists(dso_fname):
+ print_disam(dso_fname, dso_vm_start, start_addr, stop_addr)
+ else:
+ print("Failed to find dso %s for address range [ 0x%x .. 0x%x ]" % (dso, start_addr, stop_addr))
+
+ print_srccode(comm, param_dict, sample, symbol, dso)
diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
index 973bd12b7b40..9b7746b89381 100644
--- a/tools/perf/scripts/python/intel-pt-events.py
+++ b/tools/perf/scripts/python/intel-pt-events.py
@@ -104,7 +104,13 @@ def print_ptwrite(raw_buf):
flags = data[0]
payload = data[1]
exact_ip = flags & 1
- print("IP: %u payload: %#x" % (exact_ip, payload), end=' ')
+ try:
+ s = payload.to_bytes(8, "little").decode("ascii").rstrip("\x00")
+ if not s.isprintable():
+ s = ""
+ except:
+ s = ""
+ print("IP: %u payload: %#x" % (exact_ip, payload), s, end=' ')
def print_cbr(raw_buf):
data = struct.unpack_from("<BBBBII", raw_buf)
diff --git a/tools/perf/tests/bp_account.c b/tools/perf/tests/bp_account.c
index d1ebb5561e5b..6f921db33cf9 100644
--- a/tools/perf/tests/bp_account.c
+++ b/tools/perf/tests/bp_account.c
@@ -151,11 +151,21 @@ static int detect_ioctl(void)
static int detect_share(int wp_cnt, int bp_cnt)
{
struct perf_event_attr attr;
- int i, fd[wp_cnt + bp_cnt], ret;
+ int i, *fd = NULL, ret = -1;
+
+ if (wp_cnt + bp_cnt == 0)
+ return 0;
+
+ fd = malloc(sizeof(int) * (wp_cnt + bp_cnt));
+ if (!fd)
+ return -1;
for (i = 0; i < wp_cnt; i++) {
fd[i] = wp_event((void *)&the_var, &attr);
- TEST_ASSERT_VAL("failed to create wp\n", fd[i] != -1);
+ if (fd[i] == -1) {
+ pr_err("failed to create wp\n");
+ goto out;
+ }
}
for (; i < (bp_cnt + wp_cnt); i++) {
@@ -166,9 +176,11 @@ static int detect_share(int wp_cnt, int bp_cnt)
ret = i != (bp_cnt + wp_cnt);
+out:
while (i--)
close(fd[i]);
+ free(fd);
return ret;
}
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index d336cda94a11..81cf241cd109 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -137,10 +137,10 @@ static bool has_subtests(const struct test_suite *t)
static const char *skip_reason(const struct test_suite *t, int subtest)
{
- if (t->test_cases && subtest >= 0)
- return t->test_cases[subtest].skip_reason;
+ if (!t->test_cases)
+ return NULL;
- return NULL;
+ return t->test_cases[subtest >= 0 ? subtest : 0].skip_reason;
}
static const char *test_description(const struct test_suite *t, int subtest)
@@ -299,7 +299,9 @@ static const char *shell_test__description(char *description, size_t size,
#define for_each_shell_test(entlist, nr, base, ent) \
for (int __i = 0; __i < nr && (ent = entlist[__i]); __i++) \
- if (!is_directory(base, ent) && ent->d_name[0] != '.')
+ if (!is_directory(base, ent) && \
+ is_executable_file(base, ent) && \
+ ent->d_name[0] != '.')
static const char *shell_tests__dir(char *path, size_t size)
{
diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c
index fdbf17642e45..9d3c64974f77 100644
--- a/tools/perf/tests/evsel-roundtrip-name.c
+++ b/tools/perf/tests/evsel-roundtrip-name.c
@@ -64,7 +64,7 @@ static int perf_evsel__roundtrip_cache_name_test(void)
return ret;
}
-static int __perf_evsel__name_array_test(const char *names[], int nr_names,
+static int __perf_evsel__name_array_test(const char *const names[], int nr_names,
int distance)
{
int i, err;
diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
index d54c5371c6a6..5c0032fe93ae 100644
--- a/tools/perf/tests/expr.c
+++ b/tools/perf/tests/expr.c
@@ -97,6 +97,8 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
ret |= test(ctx, "2.2 > 2.2", 0);
ret |= test(ctx, "2.2 < 1.1", 0);
ret |= test(ctx, "1.1 > 2.2", 0);
+ ret |= test(ctx, "1.1e10 < 1.1e100", 1);
+ ret |= test(ctx, "1.1e2 > 1.1e-2", 1);
if (ret) {
expr__ctx_free(ctx);
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index c3c17600f29c..30bbe144648a 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -31,7 +31,7 @@
*/
static int test__basic_mmap(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
- int err = -1;
+ int err = TEST_FAIL;
union perf_event *event;
struct perf_thread_map *threads;
struct perf_cpu_map *cpus;
@@ -83,6 +83,10 @@ static int test__basic_mmap(struct test_suite *test __maybe_unused, int subtest
evsels[i] = evsel__newtp("syscalls", name);
if (IS_ERR(evsels[i])) {
pr_debug("evsel__new(%s)\n", name);
+ if (PTR_ERR(evsels[i]) == -EACCES) {
+ /* Permissions failure, flag the failure as a skip. */
+ err = TEST_SKIP;
+ }
goto out_delete_evlist;
}
@@ -166,4 +170,14 @@ out_free_threads:
return err;
}
-DEFINE_SUITE("Read samples using the mmap interface", basic_mmap);
+static struct test_case tests__basic_mmap[] = {
+ TEST_CASE_REASON("Read samples using the mmap interface",
+ basic_mmap,
+ "permissions"),
+ { .name = NULL, }
+};
+
+struct test_suite suite__basic_mmap = {
+ .desc = "Read samples using the mmap interface",
+ .test_cases = tests__basic_mmap,
+};
diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
index 1ab362323d25..90828ae03ef5 100644
--- a/tools/perf/tests/openat-syscall-all-cpus.c
+++ b/tools/perf/tests/openat-syscall-all-cpus.c
@@ -22,7 +22,7 @@
static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
- int err = -1, fd, idx;
+ int err = TEST_FAIL, fd, idx;
struct perf_cpu cpu;
struct perf_cpu_map *cpus;
struct evsel *evsel;
@@ -49,6 +49,7 @@ static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __mayb
if (IS_ERR(evsel)) {
tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
pr_debug("%s\n", errbuf);
+ err = TEST_SKIP;
goto out_cpu_map_delete;
}
@@ -56,6 +57,7 @@ static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __mayb
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
+ err = TEST_SKIP;
goto out_evsel_delete;
}
@@ -88,7 +90,7 @@ static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __mayb
evsel->core.cpus = perf_cpu_map__get(cpus);
- err = 0;
+ err = TEST_OK;
perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
unsigned int expected;
@@ -98,7 +100,7 @@ static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __mayb
if (evsel__read_on_cpu(evsel, idx, 0) < 0) {
pr_debug("evsel__read_on_cpu\n");
- err = -1;
+ err = TEST_FAIL;
break;
}
@@ -106,7 +108,7 @@ static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __mayb
if (perf_counts(evsel->counts, idx, 0)->val != expected) {
pr_debug("evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
expected, cpu.cpu, perf_counts(evsel->counts, idx, 0)->val);
- err = -1;
+ err = TEST_FAIL;
}
}
@@ -122,4 +124,15 @@ out_thread_map_delete:
return err;
}
-DEFINE_SUITE("Detect openat syscall event on all cpus", openat_syscall_event_on_all_cpus);
+
+static struct test_case tests__openat_syscall_event_on_all_cpus[] = {
+ TEST_CASE_REASON("Detect openat syscall event on all cpus",
+ openat_syscall_event_on_all_cpus,
+ "permissions"),
+ { .name = NULL, }
+};
+
+struct test_suite suite__openat_syscall_event_on_all_cpus = {
+ .desc = "Detect openat syscall event on all cpus",
+ .test_cases = tests__openat_syscall_event_on_all_cpus,
+};
diff --git a/tools/perf/tests/openat-syscall.c b/tools/perf/tests/openat-syscall.c
index 7f4c13c4b14d..7e05b8b5cc95 100644
--- a/tools/perf/tests/openat-syscall.c
+++ b/tools/perf/tests/openat-syscall.c
@@ -16,7 +16,7 @@
static int test__openat_syscall_event(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
- int err = -1, fd;
+ int err = TEST_FAIL, fd;
struct evsel *evsel;
unsigned int nr_openat_calls = 111, i;
struct perf_thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
@@ -25,13 +25,14 @@ static int test__openat_syscall_event(struct test_suite *test __maybe_unused,
if (threads == NULL) {
pr_debug("thread_map__new\n");
- return -1;
+ return TEST_FAIL;
}
evsel = evsel__newtp("syscalls", "sys_enter_openat");
if (IS_ERR(evsel)) {
tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
pr_debug("%s\n", errbuf);
+ err = TEST_SKIP;
goto out_thread_map_delete;
}
@@ -39,6 +40,7 @@ static int test__openat_syscall_event(struct test_suite *test __maybe_unused,
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
+ err = TEST_SKIP;
goto out_evsel_delete;
}
@@ -58,7 +60,7 @@ static int test__openat_syscall_event(struct test_suite *test __maybe_unused,
goto out_close_fd;
}
- err = 0;
+ err = TEST_OK;
out_close_fd:
perf_evsel__close_fd(&evsel->core);
out_evsel_delete:
@@ -68,4 +70,14 @@ out_thread_map_delete:
return err;
}
-DEFINE_SUITE("Detect openat syscall event", openat_syscall_event);
+static struct test_case tests__openat_syscall_event[] = {
+ TEST_CASE_REASON("Detect openat syscall event",
+ openat_syscall_event,
+ "permissions"),
+ { .name = NULL, }
+};
+
+struct test_suite suite__openat_syscall_event = {
+ .desc = "Detect openat syscall event",
+ .test_cases = tests__openat_syscall_event,
+};
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index e71efadb24f5..459afdb256a1 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -56,7 +56,7 @@ static int test__checkevent_tracepoint(struct evlist *evlist)
TEST_ASSERT_VAL("wrong sample_type",
PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type);
TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->core.attr.sample_period);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_tracepoint_multi(struct evlist *evlist)
@@ -74,7 +74,7 @@ static int test__checkevent_tracepoint_multi(struct evlist *evlist)
TEST_ASSERT_VAL("wrong sample_period",
1 == evsel->core.attr.sample_period);
}
- return 0;
+ return TEST_OK;
}
static int test__checkevent_raw(struct evlist *evlist)
@@ -84,7 +84,7 @@ static int test__checkevent_raw(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_numeric(struct evlist *evlist)
@@ -94,7 +94,7 @@ static int test__checkevent_numeric(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", 1 == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", 1 == evsel->core.attr.config);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_symbolic_name(struct evlist *evlist)
@@ -105,7 +105,7 @@ static int test__checkevent_symbolic_name(struct evlist *evlist)
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config",
PERF_COUNT_HW_INSTRUCTIONS == evsel->core.attr.config);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_symbolic_name_config(struct evlist *evlist)
@@ -126,7 +126,7 @@ static int test__checkevent_symbolic_name_config(struct evlist *evlist)
0 == evsel->core.attr.config1);
TEST_ASSERT_VAL("wrong config2",
1 == evsel->core.attr.config2);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_symbolic_alias(struct evlist *evlist)
@@ -137,7 +137,7 @@ static int test__checkevent_symbolic_alias(struct evlist *evlist)
TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config",
PERF_COUNT_SW_PAGE_FAULTS == evsel->core.attr.config);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_genhw(struct evlist *evlist)
@@ -147,7 +147,7 @@ static int test__checkevent_genhw(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", (1 << 16) == evsel->core.attr.config);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_breakpoint(struct evlist *evlist)
@@ -161,7 +161,7 @@ static int test__checkevent_breakpoint(struct evlist *evlist)
evsel->core.attr.bp_type);
TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_4 ==
evsel->core.attr.bp_len);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_breakpoint_x(struct evlist *evlist)
@@ -174,7 +174,7 @@ static int test__checkevent_breakpoint_x(struct evlist *evlist)
TEST_ASSERT_VAL("wrong bp_type",
HW_BREAKPOINT_X == evsel->core.attr.bp_type);
TEST_ASSERT_VAL("wrong bp_len", sizeof(long) == evsel->core.attr.bp_len);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_breakpoint_r(struct evlist *evlist)
@@ -189,7 +189,7 @@ static int test__checkevent_breakpoint_r(struct evlist *evlist)
HW_BREAKPOINT_R == evsel->core.attr.bp_type);
TEST_ASSERT_VAL("wrong bp_len",
HW_BREAKPOINT_LEN_4 == evsel->core.attr.bp_len);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_breakpoint_w(struct evlist *evlist)
@@ -204,7 +204,7 @@ static int test__checkevent_breakpoint_w(struct evlist *evlist)
HW_BREAKPOINT_W == evsel->core.attr.bp_type);
TEST_ASSERT_VAL("wrong bp_len",
HW_BREAKPOINT_LEN_4 == evsel->core.attr.bp_len);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_breakpoint_rw(struct evlist *evlist)
@@ -219,7 +219,7 @@ static int test__checkevent_breakpoint_rw(struct evlist *evlist)
(HW_BREAKPOINT_R|HW_BREAKPOINT_W) == evsel->core.attr.bp_type);
TEST_ASSERT_VAL("wrong bp_len",
HW_BREAKPOINT_LEN_4 == evsel->core.attr.bp_len);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_tracepoint_modifier(struct evlist *evlist)
@@ -450,7 +450,7 @@ static int test__checkevent_pmu(struct evlist *evlist)
*/
TEST_ASSERT_VAL("wrong period", 0 == evsel->core.attr.sample_period);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_list(struct evlist *evlist)
@@ -489,7 +489,7 @@ static int test__checkevent_list(struct evlist *evlist)
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_pmu_name(struct evlist *evlist)
@@ -510,7 +510,7 @@ static int test__checkevent_pmu_name(struct evlist *evlist)
TEST_ASSERT_VAL("wrong name",
!strcmp(evsel__name(evsel), "cpu/config=2/u"));
- return 0;
+ return TEST_OK;
}
static int test__checkevent_pmu_partial_time_callgraph(struct evlist *evlist)
@@ -541,7 +541,7 @@ static int test__checkevent_pmu_partial_time_callgraph(struct evlist *evlist)
TEST_ASSERT_VAL("wrong callgraph", !evsel__has_callchain(evsel));
TEST_ASSERT_VAL("wrong time", !(PERF_SAMPLE_TIME & evsel->core.attr.sample_type));
- return 0;
+ return TEST_OK;
}
static int test__checkevent_pmu_events(struct evlist *evlist)
@@ -559,7 +559,7 @@ static int test__checkevent_pmu_events(struct evlist *evlist)
TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive);
- return 0;
+ return TEST_OK;
}
@@ -591,7 +591,7 @@ static int test__checkevent_pmu_events_mix(struct evlist *evlist)
TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.pinned);
- return 0;
+ return TEST_OK;
}
static int test__checkterms_simple(struct list_head *terms)
@@ -662,7 +662,7 @@ static int test__checkterms_simple(struct list_head *terms)
term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
TEST_ASSERT_VAL("wrong val", term->val.num == 0xead);
TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config"));
- return 0;
+ return TEST_OK;
}
static int test__group1(struct evlist *evlist)
@@ -704,7 +704,7 @@ static int test__group1(struct evlist *evlist)
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
- return 0;
+ return TEST_OK;
}
static int test__group2(struct evlist *evlist)
@@ -759,7 +759,7 @@ static int test__group2(struct evlist *evlist)
TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
- return 0;
+ return TEST_OK;
}
static int test__group3(struct evlist *evlist __maybe_unused)
@@ -851,7 +851,7 @@ static int test__group3(struct evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
- return 0;
+ return TEST_OK;
}
static int test__group4(struct evlist *evlist __maybe_unused)
@@ -895,7 +895,7 @@ static int test__group4(struct evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
- return 0;
+ return TEST_OK;
}
static int test__group5(struct evlist *evlist __maybe_unused)
@@ -981,7 +981,7 @@ static int test__group5(struct evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
- return 0;
+ return TEST_OK;
}
static int test__group_gh1(struct evlist *evlist)
@@ -1021,7 +1021,7 @@ static int test__group_gh1(struct evlist *evlist)
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
- return 0;
+ return TEST_OK;
}
static int test__group_gh2(struct evlist *evlist)
@@ -1061,7 +1061,7 @@ static int test__group_gh2(struct evlist *evlist)
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
- return 0;
+ return TEST_OK;
}
static int test__group_gh3(struct evlist *evlist)
@@ -1101,7 +1101,7 @@ static int test__group_gh3(struct evlist *evlist)
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
- return 0;
+ return TEST_OK;
}
static int test__group_gh4(struct evlist *evlist)
@@ -1141,7 +1141,7 @@ static int test__group_gh4(struct evlist *evlist)
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
- return 0;
+ return TEST_OK;
}
static int test__leader_sample1(struct evlist *evlist)
@@ -1194,7 +1194,7 @@ static int test__leader_sample1(struct evlist *evlist)
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
- return 0;
+ return TEST_OK;
}
static int test__leader_sample2(struct evlist *evlist __maybe_unused)
@@ -1233,7 +1233,7 @@ static int test__leader_sample2(struct evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_pinned_modifier(struct evlist *evlist)
@@ -1277,7 +1277,7 @@ static int test__pinned_group(struct evlist *evlist)
PERF_COUNT_HW_BRANCH_MISSES == evsel->core.attr.config);
TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_exclusive_modifier(struct evlist *evlist)
@@ -1321,7 +1321,7 @@ static int test__exclusive_group(struct evlist *evlist)
PERF_COUNT_HW_BRANCH_MISSES == evsel->core.attr.config);
TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_breakpoint_len(struct evlist *evlist)
{
@@ -1335,7 +1335,7 @@ static int test__checkevent_breakpoint_len(struct evlist *evlist)
TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_1 ==
evsel->core.attr.bp_len);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_breakpoint_len_w(struct evlist *evlist)
@@ -1350,7 +1350,7 @@ static int test__checkevent_breakpoint_len_w(struct evlist *evlist)
TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_2 ==
evsel->core.attr.bp_len);
- return 0;
+ return TEST_OK;
}
static int
@@ -1374,7 +1374,7 @@ static int test__checkevent_precise_max_modifier(struct evlist *evlist)
TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config",
PERF_COUNT_SW_TASK_CLOCK == evsel->core.attr.config);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_config_symbol(struct evlist *evlist)
@@ -1382,7 +1382,7 @@ static int test__checkevent_config_symbol(struct evlist *evlist)
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "insn") == 0);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_config_raw(struct evlist *evlist)
@@ -1390,7 +1390,7 @@ static int test__checkevent_config_raw(struct evlist *evlist)
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "rawpmu") == 0);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_config_num(struct evlist *evlist)
@@ -1398,7 +1398,7 @@ static int test__checkevent_config_num(struct evlist *evlist)
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "numpmu") == 0);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_config_cache(struct evlist *evlist)
@@ -1406,7 +1406,7 @@ static int test__checkevent_config_cache(struct evlist *evlist)
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "cachepmu") == 0);
- return 0;
+ return TEST_OK;
}
static bool test__intel_pt_valid(void)
@@ -1419,7 +1419,7 @@ static int test__intel_pt(struct evlist *evlist)
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "intel_pt//u") == 0);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_complex_name(struct evlist *evlist)
@@ -1427,7 +1427,7 @@ static int test__checkevent_complex_name(struct evlist *evlist)
struct evsel *evsel = evlist__first(evlist);
TEST_ASSERT_VAL("wrong complex name parsing", strcmp(evsel->name, "COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks") == 0);
- return 0;
+ return TEST_OK;
}
static int test__checkevent_raw_pmu(struct evlist *evlist)
@@ -1437,7 +1437,7 @@ static int test__checkevent_raw_pmu(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config);
- return 0;
+ return TEST_OK;
}
static int test__sym_event_slash(struct evlist *evlist)
@@ -1447,7 +1447,7 @@ static int test__sym_event_slash(struct evlist *evlist)
TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE);
TEST_ASSERT_VAL("wrong config", evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- return 0;
+ return TEST_OK;
}
static int test__sym_event_dc(struct evlist *evlist)
@@ -1457,7 +1457,7 @@ static int test__sym_event_dc(struct evlist *evlist)
TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE);
TEST_ASSERT_VAL("wrong config", evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES);
TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
- return 0;
+ return TEST_OK;
}
static int count_tracepoints(void)
@@ -1521,7 +1521,7 @@ static int test__hybrid_hw_event_with_pmu(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config);
- return 0;
+ return TEST_OK;
}
static int test__hybrid_hw_group_event(struct evlist *evlist)
@@ -1538,7 +1538,7 @@ static int test__hybrid_hw_group_event(struct evlist *evlist)
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", 0xc0 == evsel->core.attr.config);
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
- return 0;
+ return TEST_OK;
}
static int test__hybrid_sw_hw_group_event(struct evlist *evlist)
@@ -1554,7 +1554,7 @@ static int test__hybrid_sw_hw_group_event(struct evlist *evlist)
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config);
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
- return 0;
+ return TEST_OK;
}
static int test__hybrid_hw_sw_group_event(struct evlist *evlist)
@@ -1570,7 +1570,7 @@ static int test__hybrid_hw_sw_group_event(struct evlist *evlist)
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
- return 0;
+ return TEST_OK;
}
static int test__hybrid_group_modifier1(struct evlist *evlist)
@@ -1591,7 +1591,7 @@ static int test__hybrid_group_modifier1(struct evlist *evlist)
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
- return 0;
+ return TEST_OK;
}
static int test__hybrid_raw1(struct evlist *evlist)
@@ -1602,7 +1602,7 @@ static int test__hybrid_raw1(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config);
- return 0;
+ return TEST_OK;
}
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
@@ -1612,7 +1612,7 @@ static int test__hybrid_raw1(struct evlist *evlist)
/* The type of second event is randome value */
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config);
- return 0;
+ return TEST_OK;
}
static int test__hybrid_raw2(struct evlist *evlist)
@@ -1622,7 +1622,7 @@ static int test__hybrid_raw2(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config);
- return 0;
+ return TEST_OK;
}
static int test__hybrid_cache_event(struct evlist *evlist)
@@ -1632,434 +1632,435 @@ static int test__hybrid_cache_event(struct evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong config", 0x2 == (evsel->core.attr.config & 0xffffffff));
- return 0;
+ return TEST_OK;
}
struct evlist_test {
const char *name;
- __u32 type;
- const int id;
bool (*valid)(void);
int (*check)(struct evlist *evlist);
};
-static struct evlist_test test__events[] = {
+static const struct evlist_test test__events[] = {
{
.name = "syscalls:sys_enter_openat",
.check = test__checkevent_tracepoint,
- .id = 0,
+ /* 0 */
},
{
.name = "syscalls:*",
.check = test__checkevent_tracepoint_multi,
- .id = 1,
+ /* 1 */
},
{
.name = "r1a",
.check = test__checkevent_raw,
- .id = 2,
+ /* 2 */
},
{
.name = "1:1",
.check = test__checkevent_numeric,
- .id = 3,
+ /* 3 */
},
{
.name = "instructions",
.check = test__checkevent_symbolic_name,
- .id = 4,
+ /* 4 */
},
{
.name = "cycles/period=100000,config2/",
.check = test__checkevent_symbolic_name_config,
- .id = 5,
+ /* 5 */
},
{
.name = "faults",
.check = test__checkevent_symbolic_alias,
- .id = 6,
+ /* 6 */
},
{
.name = "L1-dcache-load-miss",
.check = test__checkevent_genhw,
- .id = 7,
+ /* 7 */
},
{
.name = "mem:0",
.check = test__checkevent_breakpoint,
- .id = 8,
+ /* 8 */
},
{
.name = "mem:0:x",
.check = test__checkevent_breakpoint_x,
- .id = 9,
+ /* 9 */
},
{
.name = "mem:0:r",
.check = test__checkevent_breakpoint_r,
- .id = 10,
+ /* 0 */
},
{
.name = "mem:0:w",
.check = test__checkevent_breakpoint_w,
- .id = 11,
+ /* 1 */
},
{
.name = "syscalls:sys_enter_openat:k",
.check = test__checkevent_tracepoint_modifier,
- .id = 12,
+ /* 2 */
},
{
.name = "syscalls:*:u",
.check = test__checkevent_tracepoint_multi_modifier,
- .id = 13,
+ /* 3 */
},
{
.name = "r1a:kp",
.check = test__checkevent_raw_modifier,
- .id = 14,
+ /* 4 */
},
{
.name = "1:1:hp",
.check = test__checkevent_numeric_modifier,
- .id = 15,
+ /* 5 */
},
{
.name = "instructions:h",
.check = test__checkevent_symbolic_name_modifier,
- .id = 16,
+ /* 6 */
},
{
.name = "faults:u",
.check = test__checkevent_symbolic_alias_modifier,
- .id = 17,
+ /* 7 */
},
{
.name = "L1-dcache-load-miss:kp",
.check = test__checkevent_genhw_modifier,
- .id = 18,
+ /* 8 */
},
{
.name = "mem:0:u",
.check = test__checkevent_breakpoint_modifier,
- .id = 19,
+ /* 9 */
},
{
.name = "mem:0:x:k",
.check = test__checkevent_breakpoint_x_modifier,
- .id = 20,
+ /* 0 */
},
{
.name = "mem:0:r:hp",
.check = test__checkevent_breakpoint_r_modifier,
- .id = 21,
+ /* 1 */
},
{
.name = "mem:0:w:up",
.check = test__checkevent_breakpoint_w_modifier,
- .id = 22,
+ /* 2 */
},
{
.name = "r1,syscalls:sys_enter_openat:k,1:1:hp",
.check = test__checkevent_list,
- .id = 23,
+ /* 3 */
},
{
.name = "instructions:G",
.check = test__checkevent_exclude_host_modifier,
- .id = 24,
+ /* 4 */
},
{
.name = "instructions:H",
.check = test__checkevent_exclude_guest_modifier,
- .id = 25,
+ /* 5 */
},
{
.name = "mem:0:rw",
.check = test__checkevent_breakpoint_rw,
- .id = 26,
+ /* 6 */
},
{
.name = "mem:0:rw:kp",
.check = test__checkevent_breakpoint_rw_modifier,
- .id = 27,
+ /* 7 */
},
{
.name = "{instructions:k,cycles:upp}",
.check = test__group1,
- .id = 28,
+ /* 8 */
},
{
.name = "{faults:k,cache-references}:u,cycles:k",
.check = test__group2,
- .id = 29,
+ /* 9 */
},
{
.name = "group1{syscalls:sys_enter_openat:H,cycles:kppp},group2{cycles,1:3}:G,instructions:u",
.check = test__group3,
- .id = 30,
+ /* 0 */
},
{
.name = "{cycles:u,instructions:kp}:p",
.check = test__group4,
- .id = 31,
+ /* 1 */
},
{
.name = "{cycles,instructions}:G,{cycles:G,instructions:G},cycles",
.check = test__group5,
- .id = 32,
+ /* 2 */
},
{
.name = "*:*",
.check = test__all_tracepoints,
- .id = 33,
+ /* 3 */
},
{
.name = "{cycles,cache-misses:G}:H",
.check = test__group_gh1,
- .id = 34,
+ /* 4 */
},
{
.name = "{cycles,cache-misses:H}:G",
.check = test__group_gh2,
- .id = 35,
+ /* 5 */
},
{
.name = "{cycles:G,cache-misses:H}:u",
.check = test__group_gh3,
- .id = 36,
+ /* 6 */
},
{
.name = "{cycles:G,cache-misses:H}:uG",
.check = test__group_gh4,
- .id = 37,
+ /* 7 */
},
{
.name = "{cycles,cache-misses,branch-misses}:S",
.check = test__leader_sample1,
- .id = 38,
+ /* 8 */
},
{
.name = "{instructions,branch-misses}:Su",
.check = test__leader_sample2,
- .id = 39,
+ /* 9 */
},
{
.name = "instructions:uDp",
.check = test__checkevent_pinned_modifier,
- .id = 40,
+ /* 0 */
},
{
.name = "{cycles,cache-misses,branch-misses}:D",
.check = test__pinned_group,
- .id = 41,
+ /* 1 */
},
{
.name = "mem:0/1",
.check = test__checkevent_breakpoint_len,
- .id = 42,
+ /* 2 */
},
{
.name = "mem:0/2:w",
.check = test__checkevent_breakpoint_len_w,
- .id = 43,
+ /* 3 */
},
{
.name = "mem:0/4:rw:u",
.check = test__checkevent_breakpoint_len_rw_modifier,
- .id = 44
+ /* 4 */
},
#if defined(__s390x__)
{
.name = "kvm-s390:kvm_s390_create_vm",
.check = test__checkevent_tracepoint,
.valid = kvm_s390_create_vm_valid,
- .id = 100,
+ /* 0 */
},
#endif
{
.name = "instructions:I",
.check = test__checkevent_exclude_idle_modifier,
- .id = 45,
+ /* 5 */
},
{
.name = "instructions:kIG",
.check = test__checkevent_exclude_idle_modifier_1,
- .id = 46,
+ /* 6 */
},
{
.name = "task-clock:P,cycles",
.check = test__checkevent_precise_max_modifier,
- .id = 47,
+ /* 7 */
},
{
.name = "instructions/name=insn/",
.check = test__checkevent_config_symbol,
- .id = 48,
+ /* 8 */
},
{
.name = "r1234/name=rawpmu/",
.check = test__checkevent_config_raw,
- .id = 49,
+ /* 9 */
},
{
.name = "4:0x6530160/name=numpmu/",
.check = test__checkevent_config_num,
- .id = 50,
+ /* 0 */
},
{
.name = "L1-dcache-misses/name=cachepmu/",
.check = test__checkevent_config_cache,
- .id = 51,
+ /* 1 */
},
{
.name = "intel_pt//u",
.valid = test__intel_pt_valid,
.check = test__intel_pt,
- .id = 52,
+ /* 2 */
},
{
.name = "cycles/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks'/Duk",
.check = test__checkevent_complex_name,
- .id = 53
+ /* 3 */
},
{
.name = "cycles//u",
.check = test__sym_event_slash,
- .id = 54,
+ /* 4 */
},
{
.name = "cycles:k",
.check = test__sym_event_dc,
- .id = 55,
+ /* 5 */
},
{
.name = "instructions:uep",
.check = test__checkevent_exclusive_modifier,
- .id = 56,
+ /* 6 */
},
{
.name = "{cycles,cache-misses,branch-misses}:e",
.check = test__exclusive_group,
- .id = 57,
+ /* 7 */
},
};
-static struct evlist_test test__events_pmu[] = {
+static const struct evlist_test test__events_pmu[] = {
{
.name = "cpu/config=10,config1,config2=3,period=1000/u",
.check = test__checkevent_pmu,
- .id = 0,
+ /* 0 */
},
{
.name = "cpu/config=1,name=krava/u,cpu/config=2/u",
.check = test__checkevent_pmu_name,
- .id = 1,
+ /* 1 */
},
{
.name = "cpu/config=1,call-graph=fp,time,period=100000/,cpu/config=2,call-graph=no,time=0,period=2000/",
.check = test__checkevent_pmu_partial_time_callgraph,
- .id = 2,
+ /* 2 */
},
{
.name = "cpu/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks',period=0x1,event=0x2/ukp",
.check = test__checkevent_complex_name,
- .id = 3,
+ /* 3 */
},
{
.name = "software/r1a/",
.check = test__checkevent_raw_pmu,
- .id = 4,
+ /* 4 */
},
{
.name = "software/r0x1a/",
.check = test__checkevent_raw_pmu,
- .id = 4,
+ /* 5 */
},
};
struct terms_test {
const char *str;
- __u32 type;
int (*check)(struct list_head *terms);
};
-static struct terms_test test__terms[] = {
+static const struct terms_test test__terms[] = {
[0] = {
.str = "config=10,config1,config2=3,umask=1,read,r0xead",
.check = test__checkterms_simple,
},
};
-static struct evlist_test test__hybrid_events[] = {
+static const struct evlist_test test__hybrid_events[] = {
{
.name = "cpu_core/cpu-cycles/",
.check = test__hybrid_hw_event_with_pmu,
- .id = 0,
+ /* 0 */
},
{
.name = "{cpu_core/cpu-cycles/,cpu_core/instructions/}",
.check = test__hybrid_hw_group_event,
- .id = 1,
+ /* 1 */
},
{
.name = "{cpu-clock,cpu_core/cpu-cycles/}",
.check = test__hybrid_sw_hw_group_event,
- .id = 2,
+ /* 2 */
},
{
.name = "{cpu_core/cpu-cycles/,cpu-clock}",
.check = test__hybrid_hw_sw_group_event,
- .id = 3,
+ /* 3 */
},
{
.name = "{cpu_core/cpu-cycles/k,cpu_core/instructions/u}",
.check = test__hybrid_group_modifier1,
- .id = 4,
+ /* 4 */
},
{
.name = "r1a",
.check = test__hybrid_raw1,
- .id = 5,
+ /* 5 */
},
{
.name = "cpu_core/r1a/",
.check = test__hybrid_raw2,
- .id = 6,
+ /* 6 */
},
{
.name = "cpu_core/config=10,config1,config2=3,period=1000/u",
.check = test__checkevent_pmu,
- .id = 7,
+ /* 7 */
},
{
.name = "cpu_core/LLC-loads/",
.check = test__hybrid_cache_event,
- .id = 8,
+ /* 8 */
},
};
-static int test_event(struct evlist_test *e)
+static int test_event(const struct evlist_test *e)
{
struct parse_events_error err;
struct evlist *evlist;
int ret;
if (e->valid && !e->valid()) {
- pr_debug("... SKIP");
- return 0;
+ pr_debug("... SKIP\n");
+ return TEST_OK;
}
evlist = evlist__new();
- if (evlist == NULL)
- return -ENOMEM;
-
+ if (evlist == NULL) {
+ pr_err("Failed allocation");
+ return TEST_FAIL;
+ }
parse_events_error__init(&err);
ret = parse_events(evlist, e->name, &err);
if (ret) {
pr_debug("failed to parse event '%s', err %d, str '%s'\n",
e->name, ret, err.str);
parse_events_error__print(&err, e->name);
+ ret = TEST_FAIL;
+ if (strstr(err.str, "can't access trace events"))
+ ret = TEST_SKIP;
} else {
ret = e->check(evlist);
}
@@ -2094,25 +2095,40 @@ static int test_event_fake_pmu(const char *str)
return ret;
}
-static int test_events(struct evlist_test *events, unsigned cnt)
+static int combine_test_results(int existing, int latest)
+{
+ if (existing == TEST_FAIL)
+ return TEST_FAIL;
+ if (existing == TEST_SKIP)
+ return latest == TEST_OK ? TEST_SKIP : latest;
+ return latest;
+}
+
+static int test_events(const struct evlist_test *events, int cnt)
{
- int ret1, ret2 = 0;
- unsigned i;
+ int ret = TEST_OK;
- for (i = 0; i < cnt; i++) {
- struct evlist_test *e = &events[i];
+ for (int i = 0; i < cnt; i++) {
+ const struct evlist_test *e = &events[i];
+ int test_ret;
- pr_debug("running test %d '%s'", e->id, e->name);
- ret1 = test_event(e);
- if (ret1)
- ret2 = ret1;
- pr_debug("\n");
+ pr_debug("running test %d '%s'\n", i, e->name);
+ test_ret = test_event(e);
+ if (test_ret != TEST_OK) {
+ pr_debug("Event test failure: test %d '%s'", i, e->name);
+ ret = combine_test_results(ret, test_ret);
+ }
}
- return ret2;
+ return ret;
}
-static int test_term(struct terms_test *t)
+static int test__events2(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ return test_events(test__events, ARRAY_SIZE(test__events));
+}
+
+static int test_term(const struct terms_test *t)
{
struct list_head terms;
int ret;
@@ -2139,13 +2155,12 @@ static int test_term(struct terms_test *t)
return ret;
}
-static int test_terms(struct terms_test *terms, unsigned cnt)
+static int test_terms(const struct terms_test *terms, int cnt)
{
int ret = 0;
- unsigned i;
- for (i = 0; i < cnt; i++) {
- struct terms_test *t = &terms[i];
+ for (int i = 0; i < cnt; i++) {
+ const struct terms_test *t = &terms[i];
pr_debug("running test %d '%s'\n", i, t->str);
ret = test_term(t);
@@ -2156,6 +2171,11 @@ static int test_terms(struct terms_test *terms, unsigned cnt)
return ret;
}
+static int test__terms2(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ return test_terms(test__terms, ARRAY_SIZE(test__terms));
+}
+
static int test_pmu(void)
{
struct stat st;
@@ -2171,7 +2191,7 @@ static int test_pmu(void)
return !ret;
}
-static int test_pmu_events(void)
+static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct stat st;
char path[PATH_MAX];
@@ -2179,24 +2199,29 @@ static int test_pmu_events(void)
DIR *dir;
int ret;
+ if (!test_pmu())
+ return TEST_SKIP;
+
snprintf(path, PATH_MAX, "%s/bus/event_source/devices/cpu/events/",
sysfs__mountpoint());
ret = stat(path, &st);
if (ret) {
- pr_debug("omitting PMU cpu events tests\n");
- return 0;
+ pr_debug("omitting PMU cpu events tests: %s\n", path);
+ return TEST_OK;
}
dir = opendir(path);
if (!dir) {
- pr_debug("can't open pmu event dir");
- return -1;
+ pr_debug("can't open pmu event dir: %s\n", path);
+ return TEST_FAIL;
}
- while (!ret && (ent = readdir(dir))) {
- struct evlist_test e = { .id = 0, };
+ ret = TEST_OK;
+ while ((ent = readdir(dir))) {
+ struct evlist_test e = { .name = NULL, };
char name[2 * NAME_MAX + 1 + 12 + 3];
+ int test_ret;
/* Names containing . are special and cannot be used directly */
if (strchr(ent->d_name, '.'))
@@ -2207,19 +2232,33 @@ static int test_pmu_events(void)
e.name = name;
e.check = test__checkevent_pmu_events;
- ret = test_event(&e);
- if (ret)
- break;
+ test_ret = test_event(&e);
+ if (test_ret != TEST_OK) {
+ pr_debug("Test PMU event failed for '%s'", name);
+ ret = combine_test_results(ret, test_ret);
+ }
snprintf(name, sizeof(name), "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name);
e.name = name;
e.check = test__checkevent_pmu_events_mix;
- ret = test_event(&e);
+ test_ret = test_event(&e);
+ if (test_ret != TEST_OK) {
+ pr_debug("Test PMU event failed for '%s'", name);
+ ret = combine_test_results(ret, test_ret);
+ }
}
closedir(dir);
return ret;
}
+static int test__pmu_events2(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ if (!test_pmu())
+ return TEST_SKIP;
+
+ return test_events(test__events_pmu, ARRAY_SIZE(test__events_pmu));
+}
+
static bool test_alias(char **event, char **alias)
{
char path[PATH_MAX];
@@ -2278,6 +2317,14 @@ static bool test_alias(char **event, char **alias)
return false;
}
+static int test__hybrid(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ if (!perf_pmu__has_hybrid())
+ return TEST_SKIP;
+
+ return test_events(test__hybrid_events, ARRAY_SIZE(test__hybrid_events));
+}
+
static int test__checkevent_pmu_events_alias(struct evlist *evlist)
{
struct evsel *evsel1 = evlist__first(evlist);
@@ -2285,12 +2332,12 @@ static int test__checkevent_pmu_events_alias(struct evlist *evlist)
TEST_ASSERT_VAL("wrong type", evsel1->core.attr.type == evsel2->core.attr.type);
TEST_ASSERT_VAL("wrong config", evsel1->core.attr.config == evsel2->core.attr.config);
- return 0;
+ return TEST_OK;
}
-static int test_pmu_events_alias(char *event, char *alias)
+static int test__pmu_events_alias(char *event, char *alias)
{
- struct evlist_test e = { .id = 0, };
+ struct evlist_test e = { .name = NULL, };
char name[2 * NAME_MAX + 20];
snprintf(name, sizeof(name), "%s/event=1/,%s/event=1/",
@@ -2301,72 +2348,63 @@ static int test_pmu_events_alias(char *event, char *alias)
return test_event(&e);
}
-static int test_pmu_events_alias2(void)
+static int test__alias(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
- static const char events[][30] = {
- "event-hyphen",
- "event-two-hyph",
- };
- unsigned long i;
- int ret = 0;
+ char *event, *alias;
+ int ret;
- for (i = 0; i < ARRAY_SIZE(events); i++) {
- ret = test_event_fake_pmu(&events[i][0]);
- if (ret) {
- pr_err("check_parse_fake %s failed\n", &events[i][0]);
- break;
- }
- }
+ if (!test_alias(&event, &alias))
+ return TEST_SKIP;
+ ret = test__pmu_events_alias(event, alias);
+
+ free(event);
+ free(alias);
return ret;
}
-static int test__parse_events(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+static int test__pmu_events_alias2(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
{
- int ret1, ret2 = 0;
- char *event, *alias;
-
-#define TEST_EVENTS(tests) \
-do { \
- ret1 = test_events(tests, ARRAY_SIZE(tests)); \
- if (!ret2) \
- ret2 = ret1; \
-} while (0)
-
- if (perf_pmu__has_hybrid()) {
- TEST_EVENTS(test__hybrid_events);
- return ret2;
- }
-
- TEST_EVENTS(test__events);
-
- if (test_pmu())
- TEST_EVENTS(test__events_pmu);
-
- if (test_pmu()) {
- int ret = test_pmu_events();
- if (ret)
- return ret;
- }
+ static const char events[][30] = {
+ "event-hyphen",
+ "event-two-hyph",
+ };
+ int ret = TEST_OK;
- if (test_alias(&event, &alias)) {
- int ret = test_pmu_events_alias(event, alias);
+ for (unsigned int i = 0; i < ARRAY_SIZE(events); i++) {
+ int test_ret = test_event_fake_pmu(&events[i][0]);
- free(event);
- free(alias);
- if (ret)
- return ret;
+ if (test_ret != TEST_OK) {
+ pr_debug("check_parse_fake %s failed\n", &events[i][0]);
+ ret = combine_test_results(ret, test_ret);
+ }
}
- ret1 = test_pmu_events_alias2();
- if (!ret2)
- ret2 = ret1;
-
- ret1 = test_terms(test__terms, ARRAY_SIZE(test__terms));
- if (!ret2)
- ret2 = ret1;
-
- return ret2;
+ return ret;
}
-DEFINE_SUITE("Parse event definition strings", parse_events);
+static struct test_case tests__parse_events[] = {
+ TEST_CASE_REASON("Test event parsing",
+ events2,
+ "permissions"),
+ TEST_CASE_REASON("Test parsing of \"hybrid\" CPU events",
+ hybrid,
+ "not hybrid"),
+ TEST_CASE_REASON("Parsing of all PMU events from sysfs",
+ pmu_events,
+ "permissions"),
+ TEST_CASE_REASON("Parsing of given PMU events from sysfs",
+ pmu_events2,
+ "permissions"),
+ TEST_CASE_REASON("Parsing of aliased events from sysfs", alias,
+ "no aliases in sysfs"),
+ TEST_CASE("Parsing of aliased events", pmu_events_alias2),
+ TEST_CASE("Parsing of terms (event modifiers)", terms2),
+ { .name = NULL, }
+};
+
+struct test_suite suite__parse_events = {
+ .desc = "Parse event definition strings",
+ .test_cases = tests__parse_events,
+};
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index 6354465067b8..6a001fcfed68 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -330,7 +330,21 @@ found_exit:
out_delete_evlist:
evlist__delete(evlist);
out:
- return (err < 0 || errs > 0) ? -1 : 0;
+ if (err == -EACCES)
+ return TEST_SKIP;
+ if (err < 0)
+ return TEST_FAIL;
+ return TEST_OK;
}
-DEFINE_SUITE("PERF_RECORD_* events & perf_sample fields", PERF_RECORD);
+static struct test_case tests__PERF_RECORD[] = {
+ TEST_CASE_REASON("PERF_RECORD_* events & perf_sample fields",
+ PERF_RECORD,
+ "permissions"),
+ { .name = NULL, }
+};
+
+struct test_suite suite__PERF_RECORD = {
+ .desc = "PERF_RECORD_* events & perf_sample fields",
+ .test_cases = tests__PERF_RECORD,
+};
diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c
index 4ad0dfbc8b21..7c7d20fc503a 100644
--- a/tools/perf/tests/perf-time-to-tsc.c
+++ b/tools/perf/tests/perf-time-to-tsc.c
@@ -20,8 +20,6 @@
#include "tsc.h"
#include "mmap.h"
#include "tests.h"
-#include "pmu.h"
-#include "pmu-hybrid.h"
/*
* Except x86_64/i386 and Arm64, other archs don't support TSC in perf. Just
@@ -106,28 +104,21 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
evlist__config(evlist, &opts, NULL);
- evsel = evlist__first(evlist);
-
- evsel->core.attr.comm = 1;
- evsel->core.attr.disabled = 1;
- evsel->core.attr.enable_on_exec = 0;
-
- /*
- * For hybrid "cycles:u", it creates two events.
- * Init the second evsel here.
- */
- if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom")) {
- evsel = evsel__next(evsel);
+ /* For hybrid "cycles:u", it creates two events */
+ evlist__for_each_entry(evlist, evsel) {
evsel->core.attr.comm = 1;
evsel->core.attr.disabled = 1;
evsel->core.attr.enable_on_exec = 0;
}
- if (evlist__open(evlist) == -ENOENT) {
- err = TEST_SKIP;
+ ret = evlist__open(evlist);
+ if (ret < 0) {
+ if (ret == -ENOENT)
+ err = TEST_SKIP;
+ else
+ pr_debug("evlist__open() failed\n");
goto out_err;
}
- CHECK__(evlist__open(evlist));
CHECK__(evlist__mmap(evlist, UINT_MAX));
@@ -167,10 +158,12 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
goto next_event;
if (strcmp(event->comm.comm, comm1) == 0) {
+ CHECK_NOT_NULL__(evsel = evlist__event2evsel(evlist, event));
CHECK__(evsel__parse_sample(evsel, event, &sample));
comm1_time = sample.time;
}
if (strcmp(event->comm.comm, comm2) == 0) {
+ CHECK_NOT_NULL__(evsel = evlist__event2evsel(evlist, event));
CHECK__(evsel__parse_sample(evsel, event, &sample));
comm2_time = sample.time;
}
diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c
index 299a215eb54c..f13368569d8b 100644
--- a/tools/perf/tests/pmu-events.c
+++ b/tools/perf/tests/pmu-events.c
@@ -63,33 +63,33 @@ static const struct perf_pmu_test_event bp_l2_btb_correct = {
static const struct perf_pmu_test_event segment_reg_loads_any = {
.event = {
.name = "segment_reg_loads.any",
- .event = "umask=0x80,period=200000,event=0x6",
+ .event = "event=0x6,period=200000,umask=0x80",
.desc = "Number of segment register loads",
.topic = "other",
},
- .alias_str = "umask=0x80,period=0x30d40,event=0x6",
+ .alias_str = "event=0x6,period=0x30d40,umask=0x80",
.alias_long_desc = "Number of segment register loads",
};
static const struct perf_pmu_test_event dispatch_blocked_any = {
.event = {
.name = "dispatch_blocked.any",
- .event = "umask=0x20,period=200000,event=0x9",
+ .event = "event=0x9,period=200000,umask=0x20",
.desc = "Memory cluster signals to block micro-op dispatch for any reason",
.topic = "other",
},
- .alias_str = "umask=0x20,period=0x30d40,event=0x9",
+ .alias_str = "event=0x9,period=0x30d40,umask=0x20",
.alias_long_desc = "Memory cluster signals to block micro-op dispatch for any reason",
};
static const struct perf_pmu_test_event eist_trans = {
.event = {
.name = "eist_trans",
- .event = "umask=0x0,period=200000,event=0x3a",
+ .event = "event=0x3a,period=200000,umask=0x0",
.desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
.topic = "other",
},
- .alias_str = "umask=0,period=0x30d40,event=0x3a",
+ .alias_str = "event=0x3a,period=0x30d40,umask=0",
.alias_long_desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
};
@@ -132,13 +132,13 @@ static const struct perf_pmu_test_event uncore_hisi_ddrc_flux_wcmd = {
static const struct perf_pmu_test_event unc_cbo_xsnp_response_miss_eviction = {
.event = {
.name = "unc_cbo_xsnp_response.miss_eviction",
- .event = "umask=0x81,event=0x22",
- .desc = "Unit: uncore_cbox A cross-core snoop resulted from L3 Eviction which misses in some processor core",
+ .event = "event=0x22,umask=0x81",
+ .desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core. Unit: uncore_cbox ",
.topic = "uncore",
.long_desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
.pmu = "uncore_cbox",
},
- .alias_str = "umask=0x81,event=0x22",
+ .alias_str = "event=0x22,umask=0x81",
.alias_long_desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
.matching_pmu = "uncore_cbox_0",
};
@@ -146,13 +146,13 @@ static const struct perf_pmu_test_event unc_cbo_xsnp_response_miss_eviction = {
static const struct perf_pmu_test_event uncore_hyphen = {
.event = {
.name = "event-hyphen",
- .event = "umask=0x00,event=0xe0",
- .desc = "Unit: uncore_cbox UNC_CBO_HYPHEN",
+ .event = "event=0xe0,umask=0x00",
+ .desc = "UNC_CBO_HYPHEN. Unit: uncore_cbox ",
.topic = "uncore",
.long_desc = "UNC_CBO_HYPHEN",
.pmu = "uncore_cbox",
},
- .alias_str = "umask=0,event=0xe0",
+ .alias_str = "event=0xe0,umask=0",
.alias_long_desc = "UNC_CBO_HYPHEN",
.matching_pmu = "uncore_cbox_0",
};
@@ -160,13 +160,13 @@ static const struct perf_pmu_test_event uncore_hyphen = {
static const struct perf_pmu_test_event uncore_two_hyph = {
.event = {
.name = "event-two-hyph",
- .event = "umask=0x00,event=0xc0",
- .desc = "Unit: uncore_cbox UNC_CBO_TWO_HYPH",
+ .event = "event=0xc0,umask=0x00",
+ .desc = "UNC_CBO_TWO_HYPH. Unit: uncore_cbox ",
.topic = "uncore",
.long_desc = "UNC_CBO_TWO_HYPH",
.pmu = "uncore_cbox",
},
- .alias_str = "umask=0,event=0xc0",
+ .alias_str = "event=0xc0,umask=0",
.alias_long_desc = "UNC_CBO_TWO_HYPH",
.matching_pmu = "uncore_cbox_0",
};
diff --git a/tools/perf/tests/shell/record.sh b/tools/perf/tests/shell/record.sh
new file mode 100755
index 000000000000..00c7285ce1ac
--- /dev/null
+++ b/tools/perf/tests/shell/record.sh
@@ -0,0 +1,80 @@
+#!/bin/sh
+# perf record tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+
+cleanup() {
+ rm -f ${perfdata}
+ rm -f ${perfdata}.old
+ trap - exit term int
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup exit term int
+
+test_per_thread() {
+ echo "Basic --per-thread mode test"
+ if ! perf record -e instructions:u -o ${perfdata} --quiet true 2> /dev/null
+ then
+ echo "Per-thread record [Skipped instructions:u not supported]"
+ if [ $err -ne 1 ]
+ then
+ err=2
+ fi
+ return
+ fi
+ if ! perf record -e instructions:u --per-thread -o ${perfdata} true 2> /dev/null
+ then
+ echo "Per-thread record of instructions:u [Failed]"
+ err=1
+ return
+ fi
+ if ! perf report -i ${perfdata} -q | egrep -q true
+ then
+ echo "Per-thread record [Failed missing output]"
+ err=1
+ return
+ fi
+ echo "Basic --per-thread mode test [Success]"
+}
+
+test_register_capture() {
+ echo "Register capture test"
+ if ! perf list | egrep -q 'br_inst_retired.near_call'
+ then
+ echo "Register capture test [Skipped missing instruction]"
+ if [ $err -ne 1 ]
+ then
+ err=2
+ fi
+ return
+ fi
+ if ! perf record --intr-regs=\? 2>&1 | egrep -q 'available registers: AX BX CX DX SI DI BP SP IP FLAGS CS SS R8 R9 R10 R11 R12 R13 R14 R15'
+ then
+ echo "Register capture test [Skipped missing registers]"
+ return
+ fi
+ if ! perf record -o - --intr-regs=di,r8,dx,cx -e cpu/br_inst_retired.near_call/p \
+ -c 1000 --per-thread true 2> /dev/null \
+ | perf script -F ip,sym,iregs -i - 2> /dev/null \
+ | egrep -q "DI:"
+ then
+ echo "Register capture test [Failed missing output]"
+ err=1
+ return
+ fi
+ echo "Register capture test [Success]"
+}
+
+test_per_thread
+test_register_capture
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/record_offcpu.sh b/tools/perf/tests/shell/record_offcpu.sh
new file mode 100755
index 000000000000..96e0739f7478
--- /dev/null
+++ b/tools/perf/tests/shell/record_offcpu.sh
@@ -0,0 +1,60 @@
+#!/bin/sh
+# perf record offcpu profiling tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+
+cleanup() {
+ rm -f ${perfdata}
+ rm -f ${perfdata}.old
+ trap - exit term int
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup exit term int
+
+test_offcpu() {
+ echo "Basic off-cpu test"
+ if [ `id -u` != 0 ]
+ then
+ echo "Basic off-cpu test [Skipped permission]"
+ err=2
+ return
+ fi
+ if perf record --off-cpu -o ${perfdata} --quiet true 2>&1 | grep BUILD_BPF_SKEL
+ then
+ echo "Basic off-cpu test [Skipped missing BPF support]"
+ err=2
+ return
+ fi
+ if ! perf record --off-cpu -e dummy -o ${perfdata} sleep 1 2> /dev/null
+ then
+ echo "Basic off-cpu test [Failed record]"
+ err=1
+ return
+ fi
+ if ! perf evlist -i ${perfdata} | grep -q "offcpu-time"
+ then
+ echo "Basic off-cpu test [Failed record]"
+ err=1
+ return
+ fi
+ if ! perf report -i ${perfdata} -q --percent-limit=90 | egrep -q sleep
+ then
+ echo "Basic off-cpu test [Failed missing output]"
+ err=1
+ return
+ fi
+ echo "Basic off-cpu test [Success]"
+}
+
+test_offcpu
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh
new file mode 100755
index 000000000000..38c26f3ef4c1
--- /dev/null
+++ b/tools/perf/tests/shell/stat+csv_output.sh
@@ -0,0 +1,168 @@
+#!/bin/bash
+# perf stat CSV output linter
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+# Tests various perf stat CSV output commands for the
+# correct number of fields and the CSV separator set to ','.
+
+set -e
+
+function commachecker()
+{
+ local -i cnt=0 exp=0
+
+ case "$1"
+ in "--no-args") exp=6
+ ;; "--system-wide") exp=6
+ ;; "--event") exp=6
+ ;; "--interval") exp=7
+ ;; "--per-thread") exp=7
+ ;; "--system-wide-no-aggr") exp=7
+ [ $(uname -m) = "s390x" ] && exp=6
+ ;; "--per-core") exp=8
+ ;; "--per-socket") exp=8
+ ;; "--per-node") exp=8
+ ;; "--per-die") exp=8
+ esac
+
+ while read line
+ do
+ # Check for lines beginning with Failed
+ x=${line:0:6}
+ [ "$x" = "Failed" ] && continue
+
+ # Count the number of commas
+ x=$(echo $line | tr -d -c ',')
+ cnt="${#x}"
+ # echo $line $cnt
+ [ "$cnt" -ne "$exp" ] && {
+ echo "wrong number of fields. expected $exp in $line" 1>&2
+ exit 1;
+ }
+ done
+ return 0
+}
+
+# Return true if perf_event_paranoid is > $1 and not running as root.
+function ParanoidAndNotRoot()
+{
+ [ $(id -u) != 0 ] && [ $(cat /proc/sys/kernel/perf_event_paranoid) -gt $1 ]
+}
+
+check_no_args()
+{
+ echo -n "Checking CSV output: no args "
+ perf stat -x, true 2>&1 | commachecker --no-args
+ echo "[Success]"
+}
+
+check_system_wide()
+{
+ echo -n "Checking CSV output: system wide "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -x, -a true 2>&1 | commachecker --system-wide
+ echo "[Success]"
+}
+
+check_system_wide_no_aggr()
+{
+ echo -n "Checking CSV output: system wide "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ echo -n "Checking CSV output: system wide no aggregation "
+ perf stat -x, -A -a --no-merge true 2>&1 | commachecker --system-wide-no-aggr
+ echo "[Success]"
+}
+
+check_interval()
+{
+ echo -n "Checking CSV output: interval "
+ perf stat -x, -I 1000 true 2>&1 | commachecker --interval
+ echo "[Success]"
+}
+
+
+check_event()
+{
+ echo -n "Checking CSV output: event "
+ perf stat -x, -e cpu-clock true 2>&1 | commachecker --event
+ echo "[Success]"
+}
+
+check_per_core()
+{
+ echo -n "Checking CSV output: per core "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -x, --per-core -a true 2>&1 | commachecker --per-core
+ echo "[Success]"
+}
+
+check_per_thread()
+{
+ echo -n "Checking CSV output: per thread "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -x, --per-thread -a true 2>&1 | commachecker --per-thread
+ echo "[Success]"
+}
+
+check_per_die()
+{
+ echo -n "Checking CSV output: per die "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -x, --per-die -a true 2>&1 | commachecker --per-die
+ echo "[Success]"
+}
+
+check_per_node()
+{
+ echo -n "Checking CSV output: per node "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -x, --per-node -a true 2>&1 | commachecker --per-node
+ echo "[Success]"
+}
+
+check_per_socket()
+{
+ echo -n "Checking CSV output: per socket "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -x, --per-socket -a true 2>&1 | commachecker --per-socket
+ echo "[Success]"
+}
+
+check_no_args
+check_system_wide
+check_system_wide_no_aggr
+check_interval
+check_event
+check_per_core
+check_per_thread
+check_per_die
+check_per_node
+check_per_socket
+exit 0
diff --git a/tools/perf/tests/shell/stat.sh b/tools/perf/tests/shell/stat.sh
new file mode 100755
index 000000000000..9313ef2739e0
--- /dev/null
+++ b/tools/perf/tests/shell/stat.sh
@@ -0,0 +1,80 @@
+#!/bin/sh
+# perf stat tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+test_default_stat() {
+ echo "Basic stat command test"
+ if ! perf stat true 2>&1 | egrep -q "Performance counter stats for 'true':"
+ then
+ echo "Basic stat command test [Failed]"
+ err=1
+ return
+ fi
+ echo "Basic stat command test [Success]"
+}
+
+test_stat_record_report() {
+ echo "stat record and report test"
+ if ! perf stat record -o - true | perf stat report -i - 2>&1 | \
+ egrep -q "Performance counter stats for 'pipe':"
+ then
+ echo "stat record and report test [Failed]"
+ err=1
+ return
+ fi
+ echo "stat record and report test [Success]"
+}
+
+test_topdown_groups() {
+ # Topdown events must be grouped with the slots event first. Test that
+ # parse-events reorders this.
+ echo "Topdown event group test"
+ if ! perf stat -e '{slots,topdown-retiring}' true > /dev/null 2>&1
+ then
+ echo "Topdown event group test [Skipped event parsing failed]"
+ return
+ fi
+ if perf stat -e '{slots,topdown-retiring}' true 2>&1 | egrep -q "<not supported>"
+ then
+ echo "Topdown event group test [Failed events not supported]"
+ err=1
+ return
+ fi
+ if perf stat -e '{topdown-retiring,slots}' true 2>&1 | egrep -q "<not supported>"
+ then
+ echo "Topdown event group test [Failed slots not reordered first]"
+ err=1
+ return
+ fi
+ echo "Topdown event group test [Success]"
+}
+
+test_topdown_weak_groups() {
+ # Weak groups break if the perf_event_open of multiple grouped events
+ # fails. Breaking a topdown group causes the events to fail. Test a very large
+ # grouping to see that the topdown events aren't broken out.
+ echo "Topdown weak groups test"
+ ok_grouping="{slots,topdown-bad-spec,topdown-be-bound,topdown-fe-bound,topdown-retiring},branch-instructions,branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,mem-loads,mem-stores,ref-cycles,cache-misses,cache-references"
+ if ! perf stat --no-merge -e "$ok_grouping" true > /dev/null 2>&1
+ then
+ echo "Topdown weak groups test [Skipped event parsing failed]"
+ return
+ fi
+ group_needs_break="{slots,topdown-bad-spec,topdown-be-bound,topdown-fe-bound,topdown-retiring,branch-instructions,branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,mem-loads,mem-stores,ref-cycles,cache-misses,cache-references}:W"
+ if perf stat --no-merge -e "$group_needs_break" true 2>&1 | egrep -q "<not supported>"
+ then
+ echo "Topdown weak groups test [Failed events not supported]"
+ err=1
+ return
+ fi
+ echo "Topdown weak groups test [Success]"
+}
+
+test_default_stat
+test_stat_record_report
+test_topdown_groups
+test_topdown_weak_groups
+exit $err
diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
index 6ffbb27afaba..ec108d45d3c6 100755
--- a/tools/perf/tests/shell/test_arm_callgraph_fp.sh
+++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
@@ -43,7 +43,7 @@ CFLAGS="-g -O0 -fno-inline -fno-omit-frame-pointer"
cc $CFLAGS $TEST_PROGRAM_SOURCE -o $TEST_PROGRAM || exit 1
# Add a 1 second delay to skip samples that are not in the leaf() function
-perf record -o $PERF_DATA --call-graph fp -e cycles//u -D 1000 -- $TEST_PROGRAM 2> /dev/null &
+perf record -o $PERF_DATA --call-graph fp -e cycles//u -D 1000 --user-callchains -- $TEST_PROGRAM 2> /dev/null &
PID=$!
echo " + Recording (PID=$PID)..."
diff --git a/tools/perf/tests/shell/test_arm_spe_fork.sh b/tools/perf/tests/shell/test_arm_spe_fork.sh
new file mode 100755
index 000000000000..c920d3583d30
--- /dev/null
+++ b/tools/perf/tests/shell/test_arm_spe_fork.sh
@@ -0,0 +1,92 @@
+#!/bin/sh
+# Check Arm SPE doesn't hang when there are forks
+
+# SPDX-License-Identifier: GPL-2.0
+# German Gomez <german.gomez@arm.com>, 2022
+
+skip_if_no_arm_spe_event() {
+ perf list | egrep -q 'arm_spe_[0-9]+//' && return 0
+ return 2
+}
+
+skip_if_no_arm_spe_event || exit 2
+
+# skip if there's no compiler
+if ! [ -x "$(command -v cc)" ]; then
+ echo "failed: no compiler, install gcc"
+ exit 2
+fi
+
+TEST_PROGRAM_SOURCE=$(mktemp /tmp/__perf_test.program.XXXXX.c)
+TEST_PROGRAM=$(mktemp /tmp/__perf_test.program.XXXXX)
+PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+PERF_RECORD_LOG=$(mktemp /tmp/__perf_test.log.XXXXX)
+
+cleanup_files()
+{
+ echo "Cleaning up files..."
+ rm -f ${PERF_RECORD_LOG}
+ rm -f ${PERF_DATA}
+ rm -f ${TEST_PROGRAM_SOURCE}
+ rm -f ${TEST_PROGRAM}
+}
+
+trap cleanup_files exit term int
+
+# compile test program
+cat << EOF > $TEST_PROGRAM_SOURCE
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/wait.h>
+
+int workload() {
+ while (1)
+ sqrt(rand());
+ return 0;
+}
+
+int main() {
+ switch (fork()) {
+ case 0:
+ return workload();
+ case -1:
+ return 1;
+ default:
+ wait(NULL);
+ }
+ return 0;
+}
+EOF
+
+echo "Compiling test program..."
+CFLAGS="-lm"
+cc $TEST_PROGRAM_SOURCE $CFLAGS -o $TEST_PROGRAM || exit 1
+
+echo "Recording workload..."
+perf record -o ${PERF_DATA} -e arm_spe/period=65536/ -vvv -- $TEST_PROGRAM > ${PERF_RECORD_LOG} 2>&1 &
+PERFPID=$!
+
+# Check if perf hangs by checking the perf-record logs.
+sleep 1
+log0=$(wc -l $PERF_RECORD_LOG)
+echo Log lines = $log0
+sleep 1
+log1=$(wc -l $PERF_RECORD_LOG)
+echo Log lines after 1 second = $log1
+
+kill $PERFPID
+wait $PERFPID
+# test program may leave an orphan process running the workload
+killall $(basename $TEST_PROGRAM)
+
+if [ "$log0" = "$log1" ];
+then
+ echo "SPE hang test: FAIL"
+ exit 1
+else
+ echo "SPE hang test: PASS"
+fi
+
+exit 0
diff --git a/tools/perf/tests/shell/test_intel_pt.sh b/tools/perf/tests/shell/test_intel_pt.sh
new file mode 100755
index 000000000000..a3298643884d
--- /dev/null
+++ b/tools/perf/tests/shell/test_intel_pt.sh
@@ -0,0 +1,71 @@
+#!/bin/sh
+# Miscellaneous Intel PT testing
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+# Skip if no Intel PT
+perf list | grep -q 'intel_pt//' || exit 2
+
+skip_cnt=0
+ok_cnt=0
+err_cnt=0
+
+tmpfile=`mktemp`
+perfdatafile=`mktemp`
+
+can_cpu_wide()
+{
+ perf record -o ${tmpfile} -B -N --no-bpf-event -e dummy:u -C $1 true 2>&1 >/dev/null || return 2
+ return 0
+}
+
+test_system_wide_side_band()
+{
+ # Need CPU 0 and CPU 1
+ can_cpu_wide 0 || return $?
+ can_cpu_wide 1 || return $?
+
+ # Record on CPU 0 a task running on CPU 1
+ perf record -B -N --no-bpf-event -o ${perfdatafile} -e intel_pt//u -C 0 -- taskset --cpu-list 1 uname
+
+ # Should get MMAP events from CPU 1 because they can be needed to decode
+ mmap_cnt=`perf script -i ${perfdatafile} --no-itrace --show-mmap-events -C 1 2>/dev/null | grep MMAP | wc -l`
+
+ if [ ${mmap_cnt} -gt 0 ] ; then
+ return 0
+ fi
+
+ echo "Failed to record MMAP events on CPU 1 when tracing CPU 0"
+ return 1
+}
+
+count_result()
+{
+ if [ $1 -eq 2 ] ; then
+ skip_cnt=`expr ${skip_cnt} \+ 1`
+ return
+ fi
+ if [ $1 -eq 0 ] ; then
+ ok_cnt=`expr ${ok_cnt} \+ 1`
+ return
+ fi
+ err_cnt=`expr ${err_cnt} \+ 1`
+}
+
+test_system_wide_side_band
+
+count_result $?
+
+rm -f ${tmpfile}
+rm -f ${perfdatafile}
+
+if [ ${err_cnt} -gt 0 ] ; then
+ exit 1
+fi
+
+if [ ${ok_cnt} -gt 0 ] ; then
+ exit 0
+fi
+
+exit 2
diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
index d23a9e322ff5..0b4f61b6cc6b 100644
--- a/tools/perf/tests/topology.c
+++ b/tools/perf/tests/topology.c
@@ -115,7 +115,7 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
* physical_package_id will be set to -1. Hence skip this
* test if physical_package_id returns -1 for cpu from perf_cpu_map.
*/
- if (strncmp(session->header.env.arch, "powerpc", 7)) {
+ if (!strncmp(session->header.env.arch, "ppc64le", 7)) {
if (cpu__get_socket_id(perf_cpu_map__cpu(map, 0)) == -1)
return TEST_SKIP;
}
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index 93dee542a177..4fd8d703ff19 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -114,7 +114,7 @@ static bool is_ignored_symbol(const char *name, char type)
static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
- int err = -1;
+ int err = TEST_FAIL;
struct rb_node *nd;
struct symbol *sym;
struct map *kallsyms_map, *vmlinux_map, *map;
@@ -142,7 +142,8 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
* and find the .ko files that match them in /lib/modules/`uname -r`/.
*/
if (machine__create_kernel_maps(&kallsyms) < 0) {
- pr_debug("machine__create_kernel_maps ");
+ pr_debug("machine__create_kernel_maps failed");
+ err = TEST_SKIP;
goto out;
}
@@ -158,7 +159,8 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
* code and with the one got from /proc/modules from the "kallsyms" code.
*/
if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms") <= 0) {
- pr_debug("dso__load_kallsyms ");
+ pr_debug("machine__load_kallsyms failed");
+ err = TEST_SKIP;
goto out;
}
@@ -178,7 +180,7 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
* Now repeat step 2, this time for the vmlinux file we'll auto-locate.
*/
if (machine__create_kernel_maps(&vmlinux) < 0) {
- pr_debug("machine__create_kernel_maps ");
+ pr_info("machine__create_kernel_maps failed");
goto out;
}
@@ -196,7 +198,7 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused
* to fixup the symbols.
*/
if (machine__load_vmlinux_path(&vmlinux) <= 0) {
- pr_debug("Couldn't find a vmlinux that matches the kernel running on this machine, skipping test\n");
+ pr_info("Couldn't find a vmlinux that matches the kernel running on this machine, skipping test\n");
err = TEST_SKIP;
goto out;
}
diff --git a/tools/perf/trace/beauty/arch_errno_names.sh b/tools/perf/trace/beauty/arch_errno_names.sh
index 2c5f72fa8108..37c53bac5f56 100755
--- a/tools/perf/trace/beauty/arch_errno_names.sh
+++ b/tools/perf/trace/beauty/arch_errno_names.sh
@@ -33,23 +33,13 @@ create_errno_lookup_func()
local arch=$(arch_string "$1")
local nr name
- cat <<EoFuncBegin
-static const char *errno_to_name__$arch(int err)
-{
- switch (err) {
-EoFuncBegin
+ printf "static const char *errno_to_name__%s(int err)\n{\n\tswitch (err) {\n" $arch
while read name nr; do
printf '\tcase %d: return "%s";\n' $nr $name
done
- cat <<EoFuncEnd
- default:
- return "(unknown)";
- }
-}
-
-EoFuncEnd
+ printf '\tdefault: return "(unknown)";\n\t}\n}\n'
}
process_arch()
diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h
index 6f85f5d957ef..17311ad9f9af 100644
--- a/tools/perf/trace/beauty/include/linux/socket.h
+++ b/tools/perf/trace/beauty/include/linux/socket.h
@@ -50,6 +50,9 @@ struct linger {
struct msghdr {
void *msg_name; /* ptr to socket address structure */
int msg_namelen; /* size of socket address structure */
+
+ int msg_inq; /* output, data left in socket */
+
struct iov_iter msg_iter; /* data */
/*
@@ -62,8 +65,9 @@ struct msghdr {
void __user *msg_control_user;
};
bool msg_control_is_user : 1;
- __kernel_size_t msg_controllen; /* ancillary data buffer length */
+ bool msg_get_inq : 1;/* return INQ after receive */
unsigned int msg_flags; /* flags on received message */
+ __kernel_size_t msg_controllen; /* ancillary data buffer length */
struct kiocb *msg_iocb; /* ptr to iocb for async requests */
};
@@ -434,6 +438,7 @@ extern struct file *do_accept(struct file *file, unsigned file_flags,
extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags);
extern int __sys_socket(int family, int type, int protocol);
+extern struct file *__sys_socket_file(int family, int type, int protocol);
extern int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen);
extern int __sys_connect_file(struct file *file, struct sockaddr_storage *addr,
int addrlen, int file_flags);
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 9a7209a99e16..a51267d88ca9 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -147,6 +147,7 @@ perf-$(CONFIG_LIBBPF) += bpf_map.o
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter.o
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter_cgroup.o
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_ftrace.o
+perf-$(CONFIG_PERF_BPF_SKEL) += bpf_off_cpu.o
perf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
perf-$(CONFIG_LIBELF) += symbol-elf.o
perf-$(CONFIG_LIBELF) += probe-file.o
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index 1a80151baed9..d040406f3314 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -387,26 +387,16 @@ static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq,
return arm_spe_deliver_synth_event(spe, speq, event, &sample);
}
-#define SPE_MEM_TYPE (ARM_SPE_L1D_ACCESS | ARM_SPE_L1D_MISS | \
- ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS | \
- ARM_SPE_REMOTE_ACCESS)
-
-static bool arm_spe__is_memory_event(enum arm_spe_sample_type type)
-{
- if (type & SPE_MEM_TYPE)
- return true;
-
- return false;
-}
-
static u64 arm_spe__synth_data_source(const struct arm_spe_record *record)
{
union perf_mem_data_src data_src = { 0 };
if (record->op == ARM_SPE_LD)
data_src.mem_op = PERF_MEM_OP_LOAD;
- else
+ else if (record->op == ARM_SPE_ST)
data_src.mem_op = PERF_MEM_OP_STORE;
+ else
+ return 0;
if (record->type & (ARM_SPE_LLC_ACCESS | ARM_SPE_LLC_MISS)) {
data_src.mem_lvl = PERF_MEM_LVL_L3;
@@ -510,7 +500,11 @@ static int arm_spe_sample(struct arm_spe_queue *speq)
return err;
}
- if (spe->sample_memory && arm_spe__is_memory_event(record->type)) {
+ /*
+ * When data_src is zero it means the record is not a memory operation,
+ * skip to synthesize memory sample for this case.
+ */
+ if (spe->sample_memory && data_src) {
err = arm_spe__synth_mem_sample(speq, spe->memory_id, data_src);
if (err)
return err;
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index df1c5bbbaa0d..511dd3caa1bc 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -125,7 +125,7 @@ int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
mm->tid = mp->tid;
mm->cpu = mp->cpu.cpu;
- if (!mp->len) {
+ if (!mp->len || !mp->mmap_needed) {
mm->base = NULL;
return 0;
}
@@ -168,13 +168,20 @@ void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
}
void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
- struct evlist *evlist, int idx,
- bool per_cpu)
+ struct evlist *evlist,
+ struct evsel *evsel, int idx)
{
+ bool per_cpu = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
+
+ mp->mmap_needed = evsel->needs_auxtrace_mmap;
+
+ if (!mp->mmap_needed)
+ return;
+
mp->idx = idx;
if (per_cpu) {
- mp->cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, idx);
+ mp->cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx);
if (evlist->core.threads)
mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
else
@@ -636,6 +643,22 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
return -EINVAL;
}
+static int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx)
+{
+ bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
+
+ if (per_cpu_mmaps) {
+ struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx);
+ int cpu_map_idx = perf_cpu_map__idx(evsel->core.cpus, evlist_cpu);
+
+ if (cpu_map_idx == -1)
+ return -EINVAL;
+ return perf_evsel__enable_cpu(&evsel->core, cpu_map_idx);
+ }
+
+ return perf_evsel__enable_thread(&evsel->core, idx);
+}
+
int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx)
{
struct evsel *evsel;
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index dc38b6f57232..cd0d25c2751c 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -344,6 +344,10 @@ struct auxtrace_mmap {
* @idx: index of this mmap
* @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
* mmap) otherwise %0
+ * @mmap_needed: set to %false for non-auxtrace events. This is needed because
+ * auxtrace mmapping is done in the same code path as non-auxtrace
+ * mmapping but not every evsel that needs non-auxtrace mmapping
+ * also needs auxtrace mmapping.
* @cpu: cpu number for a per-cpu mmap otherwise %-1
*/
struct auxtrace_mmap_params {
@@ -353,6 +357,7 @@ struct auxtrace_mmap_params {
int prot;
int idx;
pid_t tid;
+ bool mmap_needed;
struct perf_cpu cpu;
};
@@ -490,8 +495,8 @@ void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
unsigned int auxtrace_pages,
bool auxtrace_overwrite);
void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
- struct evlist *evlist, int idx,
- bool per_cpu);
+ struct evlist *evlist,
+ struct evsel *evsel, int idx);
typedef int (*process_auxtrace_t)(struct perf_tool *tool,
struct mmap *map,
@@ -863,8 +868,8 @@ void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
unsigned int auxtrace_pages,
bool auxtrace_overwrite);
void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
- struct evlist *evlist, int idx,
- bool per_cpu);
+ struct evlist *evlist,
+ struct evsel *evsel, int idx);
#define ITRACE_HELP ""
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index 8271ab764eb5..eee64ddb766d 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -35,11 +35,12 @@ struct btf *btf__load_from_kernel_by_id(__u32 id)
}
#endif
-int __weak bpf_prog_load(enum bpf_prog_type prog_type,
- const char *prog_name __maybe_unused,
- const char *license,
- const struct bpf_insn *insns, size_t insn_cnt,
- const struct bpf_prog_load_opts *opts)
+#ifndef HAVE_LIBBPF_BPF_PROG_LOAD
+int bpf_prog_load(enum bpf_prog_type prog_type,
+ const char *prog_name __maybe_unused,
+ const char *license,
+ const struct bpf_insn *insns, size_t insn_cnt,
+ const struct bpf_prog_load_opts *opts)
{
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
@@ -47,8 +48,10 @@ int __weak bpf_prog_load(enum bpf_prog_type prog_type,
opts->kern_version, opts->log_buf, opts->log_size);
#pragma GCC diagnostic pop
}
+#endif
-struct bpf_program * __weak
+#ifndef HAVE_LIBBPF_BPF_OBJECT__NEXT_PROGRAM
+struct bpf_program *
bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
{
#pragma GCC diagnostic push
@@ -56,8 +59,10 @@ bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
return bpf_program__next(prev, obj);
#pragma GCC diagnostic pop
}
+#endif
-struct bpf_map * __weak
+#ifndef HAVE_LIBBPF_BPF_OBJECT__NEXT_MAP
+struct bpf_map *
bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
{
#pragma GCC diagnostic push
@@ -65,8 +70,10 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
return bpf_map__next(prev, obj);
#pragma GCC diagnostic pop
}
+#endif
-const void * __weak
+#ifndef HAVE_LIBBPF_BTF__RAW_DATA
+const void *
btf__raw_data(const struct btf *btf_ro, __u32 *size)
{
#pragma GCC diagnostic push
@@ -74,6 +81,7 @@ btf__raw_data(const struct btf *btf_ro, __u32 *size)
return btf__get_raw_data(btf_ro, size);
#pragma GCC diagnostic pop
}
+#endif
static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
{
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index b72cef1ae959..d2c9b09ddb48 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -9,6 +9,7 @@
#include <linux/bpf.h>
#include <bpf/libbpf.h>
#include <bpf/bpf.h>
+#include <linux/filter.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -49,6 +50,7 @@ struct bpf_prog_priv {
struct bpf_insn *insns_buf;
int nr_types;
int *type_mapping;
+ int *prologue_fds;
};
struct bpf_perf_object {
@@ -56,6 +58,11 @@ struct bpf_perf_object {
struct bpf_object *obj;
};
+struct bpf_preproc_result {
+ struct bpf_insn *new_insn_ptr;
+ int new_insn_cnt;
+};
+
static LIST_HEAD(bpf_objects_list);
static struct hashmap *bpf_program_hash;
static struct hashmap *bpf_map_hash;
@@ -63,20 +70,16 @@ static struct hashmap *bpf_map_hash;
static struct bpf_perf_object *
bpf_perf_object__next(struct bpf_perf_object *prev)
{
- struct bpf_perf_object *next;
-
- if (!prev)
- next = list_first_entry(&bpf_objects_list,
- struct bpf_perf_object,
- list);
- else
- next = list_next_entry(prev, list);
+ if (!prev) {
+ if (list_empty(&bpf_objects_list))
+ return NULL;
- /* Empty list is noticed here so don't need checking on entry. */
- if (&next->list == &bpf_objects_list)
+ return list_first_entry(&bpf_objects_list, struct bpf_perf_object, list);
+ }
+ if (list_is_last(&prev->list, &bpf_objects_list))
return NULL;
- return next;
+ return list_next_entry(prev, list);
}
#define bpf_perf_object__for_each(perf_obj, tmp) \
@@ -86,6 +89,7 @@ bpf_perf_object__next(struct bpf_perf_object *prev)
(perf_obj) = (tmp), (tmp) = bpf_perf_object__next(tmp))
static bool libbpf_initialized;
+static int libbpf_sec_handler;
static int bpf_perf_object__add(struct bpf_object *obj)
{
@@ -99,16 +103,90 @@ static int bpf_perf_object__add(struct bpf_object *obj)
return perf_obj ? 0 : -ENOMEM;
}
+static void *program_priv(const struct bpf_program *prog)
+{
+ void *priv;
+
+ if (IS_ERR_OR_NULL(bpf_program_hash))
+ return NULL;
+ if (!hashmap__find(bpf_program_hash, prog, &priv))
+ return NULL;
+ return priv;
+}
+
+static struct bpf_insn prologue_init_insn[] = {
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+};
+
+static int libbpf_prog_prepare_load_fn(struct bpf_program *prog,
+ struct bpf_prog_load_opts *opts __maybe_unused,
+ long cookie __maybe_unused)
+{
+ size_t init_size_cnt = ARRAY_SIZE(prologue_init_insn);
+ size_t orig_insn_cnt, insn_cnt, init_size, orig_size;
+ struct bpf_prog_priv *priv = program_priv(prog);
+ const struct bpf_insn *orig_insn;
+ struct bpf_insn *insn;
+
+ if (IS_ERR_OR_NULL(priv)) {
+ pr_debug("bpf: failed to get private field\n");
+ return -BPF_LOADER_ERRNO__INTERNAL;
+ }
+
+ if (!priv->need_prologue)
+ return 0;
+
+ /* prepend initialization code to program instructions */
+ orig_insn = bpf_program__insns(prog);
+ orig_insn_cnt = bpf_program__insn_cnt(prog);
+ init_size = init_size_cnt * sizeof(*insn);
+ orig_size = orig_insn_cnt * sizeof(*insn);
+
+ insn_cnt = orig_insn_cnt + init_size_cnt;
+ insn = malloc(insn_cnt * sizeof(*insn));
+ if (!insn)
+ return -ENOMEM;
+
+ memcpy(insn, prologue_init_insn, init_size);
+ memcpy((char *) insn + init_size, orig_insn, orig_size);
+ bpf_program__set_insns(prog, insn, insn_cnt);
+ return 0;
+}
+
+static int libbpf_init(void)
+{
+ LIBBPF_OPTS(libbpf_prog_handler_opts, handler_opts,
+ .prog_prepare_load_fn = libbpf_prog_prepare_load_fn,
+ );
+
+ if (libbpf_initialized)
+ return 0;
+
+ libbpf_set_print(libbpf_perf_print);
+ libbpf_sec_handler = libbpf_register_prog_handler(NULL, BPF_PROG_TYPE_KPROBE,
+ 0, &handler_opts);
+ if (libbpf_sec_handler < 0) {
+ pr_debug("bpf: failed to register libbpf section handler: %d\n",
+ libbpf_sec_handler);
+ return -BPF_LOADER_ERRNO__INTERNAL;
+ }
+ libbpf_initialized = true;
+ return 0;
+}
+
struct bpf_object *
bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
{
LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = name);
struct bpf_object *obj;
+ int err;
- if (!libbpf_initialized) {
- libbpf_set_print(libbpf_perf_print);
- libbpf_initialized = true;
- }
+ err = libbpf_init();
+ if (err)
+ return ERR_PTR(err);
obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
if (IS_ERR_OR_NULL(obj)) {
@@ -135,14 +213,13 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source)
{
LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = filename);
struct bpf_object *obj;
+ int err;
- if (!libbpf_initialized) {
- libbpf_set_print(libbpf_perf_print);
- libbpf_initialized = true;
- }
+ err = libbpf_init();
+ if (err)
+ return ERR_PTR(err);
if (source) {
- int err;
void *obj_buf;
size_t obj_buf_sz;
@@ -179,14 +256,31 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source)
return obj;
}
+static void close_prologue_programs(struct bpf_prog_priv *priv)
+{
+ struct perf_probe_event *pev;
+ int i, fd;
+
+ if (!priv->need_prologue)
+ return;
+ pev = &priv->pev;
+ for (i = 0; i < pev->ntevs; i++) {
+ fd = priv->prologue_fds[i];
+ if (fd != -1)
+ close(fd);
+ }
+}
+
static void
clear_prog_priv(const struct bpf_program *prog __maybe_unused,
void *_priv)
{
struct bpf_prog_priv *priv = _priv;
+ close_prologue_programs(priv);
cleanup_perf_probe_events(&priv->pev, 1);
zfree(&priv->insns_buf);
+ zfree(&priv->prologue_fds);
zfree(&priv->type_mapping);
zfree(&priv->sys_name);
zfree(&priv->evt_name);
@@ -234,17 +328,6 @@ static bool ptr_equal(const void *key1, const void *key2,
return key1 == key2;
}
-static void *program_priv(const struct bpf_program *prog)
-{
- void *priv;
-
- if (IS_ERR_OR_NULL(bpf_program_hash))
- return NULL;
- if (!hashmap__find(bpf_program_hash, prog, &priv))
- return NULL;
- return priv;
-}
-
static int program_set_priv(struct bpf_program *prog, void *priv)
{
void *old_priv;
@@ -549,8 +632,8 @@ static int bpf__prepare_probe(void)
static int
preproc_gen_prologue(struct bpf_program *prog, int n,
- struct bpf_insn *orig_insns, int orig_insns_cnt,
- struct bpf_prog_prep_result *res)
+ const struct bpf_insn *orig_insns, int orig_insns_cnt,
+ struct bpf_preproc_result *res)
{
struct bpf_prog_priv *priv = program_priv(prog);
struct probe_trace_event *tev;
@@ -598,7 +681,6 @@ preproc_gen_prologue(struct bpf_program *prog, int n,
res->new_insn_ptr = buf;
res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
- res->pfd = NULL;
return 0;
errout:
@@ -706,7 +788,7 @@ static int hook_load_preprocessor(struct bpf_program *prog)
struct bpf_prog_priv *priv = program_priv(prog);
struct perf_probe_event *pev;
bool need_prologue = false;
- int err, i;
+ int i;
if (IS_ERR_OR_NULL(priv)) {
pr_debug("Internal error when hook preprocessor\n");
@@ -744,6 +826,13 @@ static int hook_load_preprocessor(struct bpf_program *prog)
return -ENOMEM;
}
+ priv->prologue_fds = malloc(sizeof(int) * pev->ntevs);
+ if (!priv->prologue_fds) {
+ pr_debug("Not enough memory: alloc prologue fds failed\n");
+ return -ENOMEM;
+ }
+ memset(priv->prologue_fds, -1, sizeof(int) * pev->ntevs);
+
priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
if (!priv->type_mapping) {
pr_debug("Not enough memory: alloc type_mapping failed\n");
@@ -752,13 +841,7 @@ static int hook_load_preprocessor(struct bpf_program *prog)
memset(priv->type_mapping, -1,
sizeof(int) * pev->ntevs);
- err = map_prologue(pev, priv->type_mapping, &priv->nr_types);
- if (err)
- return err;
-
- err = bpf_program__set_prep(prog, priv->nr_types,
- preproc_gen_prologue);
- return err;
+ return map_prologue(pev, priv->type_mapping, &priv->nr_types);
}
int bpf__probe(struct bpf_object *obj)
@@ -865,6 +948,77 @@ int bpf__unprobe(struct bpf_object *obj)
return ret;
}
+static int bpf_object__load_prologue(struct bpf_object *obj)
+{
+ int init_cnt = ARRAY_SIZE(prologue_init_insn);
+ const struct bpf_insn *orig_insns;
+ struct bpf_preproc_result res;
+ struct perf_probe_event *pev;
+ struct bpf_program *prog;
+ int orig_insns_cnt;
+
+ bpf_object__for_each_program(prog, obj) {
+ struct bpf_prog_priv *priv = program_priv(prog);
+ int err, i, fd;
+
+ if (IS_ERR_OR_NULL(priv)) {
+ pr_debug("bpf: failed to get private field\n");
+ return -BPF_LOADER_ERRNO__INTERNAL;
+ }
+
+ if (!priv->need_prologue)
+ continue;
+
+ /*
+ * For each program that needs prologue we do following:
+ *
+ * - take its current instructions and use them
+ * to generate the new code with prologue
+ * - load new instructions with bpf_prog_load
+ * and keep the fd in prologue_fds
+ * - new fd will be used in bpf__foreach_event
+ * to connect this program with perf evsel
+ */
+ orig_insns = bpf_program__insns(prog);
+ orig_insns_cnt = bpf_program__insn_cnt(prog);
+
+ pev = &priv->pev;
+ for (i = 0; i < pev->ntevs; i++) {
+ /*
+ * Skipping artificall prologue_init_insn instructions
+ * (init_cnt), so the prologue can be generated instead
+ * of them.
+ */
+ err = preproc_gen_prologue(prog, i,
+ orig_insns + init_cnt,
+ orig_insns_cnt - init_cnt,
+ &res);
+ if (err)
+ return err;
+
+ fd = bpf_prog_load(bpf_program__get_type(prog),
+ bpf_program__name(prog), "GPL",
+ res.new_insn_ptr,
+ res.new_insn_cnt, NULL);
+ if (fd < 0) {
+ char bf[128];
+
+ libbpf_strerror(-errno, bf, sizeof(bf));
+ pr_debug("bpf: load objects with prologue failed: err=%d: (%s)\n",
+ -errno, bf);
+ return -errno;
+ }
+ priv->prologue_fds[i] = fd;
+ }
+ /*
+ * We no longer need the original program,
+ * we can unload it.
+ */
+ bpf_program__unload(prog);
+ }
+ return 0;
+}
+
int bpf__load(struct bpf_object *obj)
{
int err;
@@ -876,7 +1030,7 @@ int bpf__load(struct bpf_object *obj)
pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf);
return err;
}
- return 0;
+ return bpf_object__load_prologue(obj);
}
int bpf__foreach_event(struct bpf_object *obj,
@@ -911,13 +1065,10 @@ int bpf__foreach_event(struct bpf_object *obj,
for (i = 0; i < pev->ntevs; i++) {
tev = &pev->tevs[i];
- if (priv->need_prologue) {
- int type = priv->type_mapping[i];
-
- fd = bpf_program__nth_fd(prog, type);
- } else {
+ if (priv->need_prologue)
+ fd = priv->prologue_fds[i];
+ else
fd = bpf_program__fd(prog);
- }
if (fd < 0) {
pr_debug("bpf: failed to get file descriptor\n");
diff --git a/tools/perf/util/bpf-utils.c b/tools/perf/util/bpf-utils.c
index e271e05e51bc..80b1d2b3729b 100644
--- a/tools/perf/util/bpf-utils.c
+++ b/tools/perf/util/bpf-utils.c
@@ -149,11 +149,10 @@ get_bpf_prog_info_linear(int fd, __u64 arrays)
count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
- data_len += count * size;
+ data_len += roundup(count * size, sizeof(__u64));
}
/* step 3: allocate continuous memory */
- data_len = roundup(data_len, sizeof(__u64));
info_linear = malloc(sizeof(struct perf_bpil) + data_len);
if (!info_linear)
return ERR_PTR(-ENOMEM);
@@ -180,7 +179,7 @@ get_bpf_prog_info_linear(int fd, __u64 arrays)
bpf_prog_info_set_offset_u64(&info_linear->info,
desc->array_offset,
ptr_to_u64(ptr));
- ptr += count * size;
+ ptr += roundup(count * size, sizeof(__u64));
}
/* step 5: call syscall again to get required arrays */
diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c
index 3ce8d03cb7ec..ef1c15e4aeba 100644
--- a/tools/perf/util/bpf_counter.c
+++ b/tools/perf/util/bpf_counter.c
@@ -224,25 +224,25 @@ static int bpf_program_profiler__disable(struct evsel *evsel)
static int bpf_program_profiler__read(struct evsel *evsel)
{
- // perf_cpu_map uses /sys/devices/system/cpu/online
- int num_cpu = evsel__nr_cpus(evsel);
// BPF_MAP_TYPE_PERCPU_ARRAY uses /sys/devices/system/cpu/possible
// Sometimes possible > online, like on a Ryzen 3900X that has 24
// threads but its possible showed 0-31 -acme
int num_cpu_bpf = libbpf_num_possible_cpus();
struct bpf_perf_event_value values[num_cpu_bpf];
struct bpf_counter *counter;
+ struct perf_counts_values *counts;
int reading_map_fd;
__u32 key = 0;
- int err, cpu;
+ int err, idx, bpf_cpu;
if (list_empty(&evsel->bpf_counter_list))
return -EAGAIN;
- for (cpu = 0; cpu < num_cpu; cpu++) {
- perf_counts(evsel->counts, cpu, 0)->val = 0;
- perf_counts(evsel->counts, cpu, 0)->ena = 0;
- perf_counts(evsel->counts, cpu, 0)->run = 0;
+ perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) {
+ counts = perf_counts(evsel->counts, idx, 0);
+ counts->val = 0;
+ counts->ena = 0;
+ counts->run = 0;
}
list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
struct bpf_prog_profiler_bpf *skel = counter->skel;
@@ -256,10 +256,15 @@ static int bpf_program_profiler__read(struct evsel *evsel)
return err;
}
- for (cpu = 0; cpu < num_cpu; cpu++) {
- perf_counts(evsel->counts, cpu, 0)->val += values[cpu].counter;
- perf_counts(evsel->counts, cpu, 0)->ena += values[cpu].enabled;
- perf_counts(evsel->counts, cpu, 0)->run += values[cpu].running;
+ for (bpf_cpu = 0; bpf_cpu < num_cpu_bpf; bpf_cpu++) {
+ idx = perf_cpu_map__idx(evsel__cpus(evsel),
+ (struct perf_cpu){.cpu = bpf_cpu});
+ if (idx == -1)
+ continue;
+ counts = perf_counts(evsel->counts, idx, 0);
+ counts->val += values[bpf_cpu].counter;
+ counts->ena += values[bpf_cpu].enabled;
+ counts->run += values[bpf_cpu].running;
}
}
return 0;
@@ -307,7 +312,10 @@ static bool bperf_attr_map_compatible(int attr_map_fd)
(map_info.value_size == sizeof(struct perf_event_attr_map_entry));
}
-int __weak
+#ifndef HAVE_LIBBPF_BPF_MAP_CREATE
+LIBBPF_API int bpf_create_map(enum bpf_map_type map_type, int key_size,
+ int value_size, int max_entries, __u32 map_flags);
+int
bpf_map_create(enum bpf_map_type map_type,
const char *map_name __maybe_unused,
__u32 key_size,
@@ -320,6 +328,7 @@ bpf_map_create(enum bpf_map_type map_type,
return bpf_create_map(map_type, key_size, value_size, max_entries, 0);
#pragma GCC diagnostic pop
}
+#endif
static int bperf_lock_attr_map(struct target *target)
{
@@ -621,6 +630,7 @@ static int bperf__read(struct evsel *evsel)
struct bperf_follower_bpf *skel = evsel->follower_skel;
__u32 num_cpu_bpf = cpu__max_cpu().cpu;
struct bpf_perf_event_value values[num_cpu_bpf];
+ struct perf_counts_values *counts;
int reading_map_fd, err = 0;
__u32 i;
int j;
@@ -639,29 +649,32 @@ static int bperf__read(struct evsel *evsel)
case BPERF_FILTER_GLOBAL:
assert(i == 0);
- perf_cpu_map__for_each_cpu(entry, j, all_cpu_map) {
- cpu = entry.cpu;
- perf_counts(evsel->counts, cpu, 0)->val = values[cpu].counter;
- perf_counts(evsel->counts, cpu, 0)->ena = values[cpu].enabled;
- perf_counts(evsel->counts, cpu, 0)->run = values[cpu].running;
+ perf_cpu_map__for_each_cpu(entry, j, evsel__cpus(evsel)) {
+ counts = perf_counts(evsel->counts, j, 0);
+ counts->val = values[entry.cpu].counter;
+ counts->ena = values[entry.cpu].enabled;
+ counts->run = values[entry.cpu].running;
}
break;
case BPERF_FILTER_CPU:
- cpu = evsel->core.cpus->map[i].cpu;
- perf_counts(evsel->counts, i, 0)->val = values[cpu].counter;
- perf_counts(evsel->counts, i, 0)->ena = values[cpu].enabled;
- perf_counts(evsel->counts, i, 0)->run = values[cpu].running;
+ cpu = perf_cpu_map__cpu(evsel__cpus(evsel), i).cpu;
+ assert(cpu >= 0);
+ counts = perf_counts(evsel->counts, i, 0);
+ counts->val = values[cpu].counter;
+ counts->ena = values[cpu].enabled;
+ counts->run = values[cpu].running;
break;
case BPERF_FILTER_PID:
case BPERF_FILTER_TGID:
- perf_counts(evsel->counts, 0, i)->val = 0;
- perf_counts(evsel->counts, 0, i)->ena = 0;
- perf_counts(evsel->counts, 0, i)->run = 0;
+ counts = perf_counts(evsel->counts, 0, i);
+ counts->val = 0;
+ counts->ena = 0;
+ counts->run = 0;
for (cpu = 0; cpu < num_cpu_bpf; cpu++) {
- perf_counts(evsel->counts, 0, i)->val += values[cpu].counter;
- perf_counts(evsel->counts, 0, i)->ena += values[cpu].enabled;
- perf_counts(evsel->counts, 0, i)->run += values[cpu].running;
+ counts->val += values[cpu].counter;
+ counts->ena += values[cpu].enabled;
+ counts->run += values[cpu].running;
}
break;
default:
diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c
index ac60c08e8e2a..63b9db657442 100644
--- a/tools/perf/util/bpf_counter_cgroup.c
+++ b/tools/perf/util/bpf_counter_cgroup.c
@@ -46,8 +46,8 @@ static int bperf_load_program(struct evlist *evlist)
struct bpf_link *link;
struct evsel *evsel;
struct cgroup *cgrp, *leader_cgrp;
- __u32 i, cpu;
- __u32 nr_cpus = evlist->core.all_cpus->nr;
+ int i, j;
+ struct perf_cpu cpu;
int total_cpus = cpu__max_cpu().cpu;
int map_size, map_fd;
int prog_fd, err;
@@ -93,9 +93,9 @@ static int bperf_load_program(struct evlist *evlist)
goto out;
}
- for (i = 0; i < nr_cpus; i++) {
+ perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
- FD(cgrp_switch, i));
+ FD(cgrp_switch, cpu.cpu));
if (IS_ERR(link)) {
pr_err("Failed to attach cgroup program\n");
err = PTR_ERR(link);
@@ -122,10 +122,9 @@ static int bperf_load_program(struct evlist *evlist)
}
map_fd = bpf_map__fd(skel->maps.events);
- for (cpu = 0; cpu < nr_cpus; cpu++) {
- int fd = FD(evsel, cpu);
- __u32 idx = evsel->core.idx * total_cpus +
- evlist->core.all_cpus->map[cpu].cpu;
+ perf_cpu_map__for_each_cpu(cpu, j, evlist->core.all_cpus) {
+ int fd = FD(evsel, cpu.cpu);
+ __u32 idx = evsel->core.idx * total_cpus + cpu.cpu;
err = bpf_map_update_elem(map_fd, &idx, &fd,
BPF_ANY);
@@ -207,14 +206,12 @@ static int bperf_cgrp__install_pe(struct evsel *evsel __maybe_unused,
*/
static int bperf_cgrp__sync_counters(struct evlist *evlist)
{
- int i, cpu;
- int nr_cpus = evlist->core.all_cpus->nr;
+ struct perf_cpu cpu;
+ int idx;
int prog_fd = bpf_program__fd(skel->progs.trigger_read);
- for (i = 0; i < nr_cpus; i++) {
- cpu = evlist->core.all_cpus->map[i].cpu;
- bperf_trigger_reading(prog_fd, cpu);
- }
+ perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.all_cpus)
+ bperf_trigger_reading(prog_fd, cpu.cpu);
return 0;
}
@@ -244,12 +241,10 @@ static int bperf_cgrp__disable(struct evsel *evsel)
static int bperf_cgrp__read(struct evsel *evsel)
{
struct evlist *evlist = evsel->evlist;
- int i, cpu, nr_cpus = evlist->core.all_cpus->nr;
int total_cpus = cpu__max_cpu().cpu;
struct perf_counts_values *counts;
struct bpf_perf_event_value *values;
int reading_map_fd, err = 0;
- __u32 idx;
if (evsel->core.idx)
return 0;
@@ -263,7 +258,10 @@ static int bperf_cgrp__read(struct evsel *evsel)
reading_map_fd = bpf_map__fd(skel->maps.cgrp_readings);
evlist__for_each_entry(evlist, evsel) {
- idx = evsel->core.idx;
+ __u32 idx = evsel->core.idx;
+ int i;
+ struct perf_cpu cpu;
+
err = bpf_map_lookup_elem(reading_map_fd, &idx, values);
if (err) {
pr_err("bpf map lookup failed: idx=%u, event=%s, cgrp=%s\n",
@@ -271,13 +269,11 @@ static int bperf_cgrp__read(struct evsel *evsel)
goto out;
}
- for (i = 0; i < nr_cpus; i++) {
- cpu = evlist->core.all_cpus->map[i].cpu;
-
+ perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
counts = perf_counts(evsel->counts, i, 0);
- counts->val = values[cpu].counter;
- counts->ena = values[cpu].enabled;
- counts->run = values[cpu].running;
+ counts->val = values[cpu.cpu].counter;
+ counts->ena = values[cpu.cpu].enabled;
+ counts->run = values[cpu.cpu].running;
}
}
diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c
new file mode 100644
index 000000000000..f289b7713598
--- /dev/null
+++ b/tools/perf/util/bpf_off_cpu.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "util/bpf_counter.h"
+#include "util/debug.h"
+#include "util/evsel.h"
+#include "util/evlist.h"
+#include "util/off_cpu.h"
+#include "util/perf-hooks.h"
+#include "util/record.h"
+#include "util/session.h"
+#include "util/target.h"
+#include "util/cpumap.h"
+#include "util/thread_map.h"
+#include "util/cgroup.h"
+#include <bpf/bpf.h>
+
+#include "bpf_skel/off_cpu.skel.h"
+
+#define MAX_STACKS 32
+/* we don't need actual timestamp, just want to put the samples at last */
+#define OFF_CPU_TIMESTAMP (~0ull << 32)
+
+static struct off_cpu_bpf *skel;
+
+struct off_cpu_key {
+ u32 pid;
+ u32 tgid;
+ u32 stack_id;
+ u32 state;
+ u64 cgroup_id;
+};
+
+union off_cpu_data {
+ struct perf_event_header hdr;
+ u64 array[1024 / sizeof(u64)];
+};
+
+static int off_cpu_config(struct evlist *evlist)
+{
+ struct evsel *evsel;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_BPF_OUTPUT,
+ .size = sizeof(attr), /* to capture ABI version */
+ };
+ char *evname = strdup(OFFCPU_EVENT);
+
+ if (evname == NULL)
+ return -ENOMEM;
+
+ evsel = evsel__new(&attr);
+ if (!evsel) {
+ free(evname);
+ return -ENOMEM;
+ }
+
+ evsel->core.attr.freq = 1;
+ evsel->core.attr.sample_period = 1;
+ /* off-cpu analysis depends on stack trace */
+ evsel->core.attr.sample_type = PERF_SAMPLE_CALLCHAIN;
+
+ evlist__add(evlist, evsel);
+
+ free(evsel->name);
+ evsel->name = evname;
+
+ return 0;
+}
+
+static void off_cpu_start(void *arg)
+{
+ struct evlist *evlist = arg;
+
+ /* update task filter for the given workload */
+ if (!skel->bss->has_cpu && !skel->bss->has_task &&
+ perf_thread_map__pid(evlist->core.threads, 0) != -1) {
+ int fd;
+ u32 pid;
+ u8 val = 1;
+
+ skel->bss->has_task = 1;
+ fd = bpf_map__fd(skel->maps.task_filter);
+ pid = perf_thread_map__pid(evlist->core.threads, 0);
+ bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
+ }
+
+ skel->bss->enabled = 1;
+}
+
+static void off_cpu_finish(void *arg __maybe_unused)
+{
+ skel->bss->enabled = 0;
+ off_cpu_bpf__destroy(skel);
+}
+
+/* v5.18 kernel added prev_state arg, so it needs to check the signature */
+static void check_sched_switch_args(void)
+{
+ const struct btf *btf = bpf_object__btf(skel->obj);
+ const struct btf_type *t1, *t2, *t3;
+ u32 type_id;
+
+ type_id = btf__find_by_name_kind(btf, "bpf_trace_sched_switch",
+ BTF_KIND_TYPEDEF);
+ if ((s32)type_id < 0)
+ return;
+
+ t1 = btf__type_by_id(btf, type_id);
+ if (t1 == NULL)
+ return;
+
+ t2 = btf__type_by_id(btf, t1->type);
+ if (t2 == NULL || !btf_is_ptr(t2))
+ return;
+
+ t3 = btf__type_by_id(btf, t2->type);
+ if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 4) {
+ /* new format: pass prev_state as 4th arg */
+ skel->rodata->has_prev_state = true;
+ }
+}
+
+int off_cpu_prepare(struct evlist *evlist, struct target *target,
+ struct record_opts *opts)
+{
+ int err, fd, i;
+ int ncpus = 1, ntasks = 1, ncgrps = 1;
+
+ if (off_cpu_config(evlist) < 0) {
+ pr_err("Failed to config off-cpu BPF event\n");
+ return -1;
+ }
+
+ skel = off_cpu_bpf__open();
+ if (!skel) {
+ pr_err("Failed to open off-cpu BPF skeleton\n");
+ return -1;
+ }
+
+ /* don't need to set cpu filter for system-wide mode */
+ if (target->cpu_list) {
+ ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
+ bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
+ }
+
+ if (target__has_task(target)) {
+ ntasks = perf_thread_map__nr(evlist->core.threads);
+ bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
+ }
+
+ if (evlist__first(evlist)->cgrp) {
+ ncgrps = evlist->core.nr_entries - 1; /* excluding a dummy */
+ bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
+
+ if (!cgroup_is_v2("perf_event"))
+ skel->rodata->uses_cgroup_v1 = true;
+ }
+
+ if (opts->record_cgroup) {
+ skel->rodata->needs_cgroup = true;
+
+ if (!cgroup_is_v2("perf_event"))
+ skel->rodata->uses_cgroup_v1 = true;
+ }
+
+ set_max_rlimit();
+ check_sched_switch_args();
+
+ err = off_cpu_bpf__load(skel);
+ if (err) {
+ pr_err("Failed to load off-cpu skeleton\n");
+ goto out;
+ }
+
+ if (target->cpu_list) {
+ u32 cpu;
+ u8 val = 1;
+
+ skel->bss->has_cpu = 1;
+ fd = bpf_map__fd(skel->maps.cpu_filter);
+
+ for (i = 0; i < ncpus; i++) {
+ cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
+ bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
+ }
+ }
+
+ if (target__has_task(target)) {
+ u32 pid;
+ u8 val = 1;
+
+ skel->bss->has_task = 1;
+ fd = bpf_map__fd(skel->maps.task_filter);
+
+ for (i = 0; i < ntasks; i++) {
+ pid = perf_thread_map__pid(evlist->core.threads, i);
+ bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
+ }
+ }
+
+ if (evlist__first(evlist)->cgrp) {
+ struct evsel *evsel;
+ u8 val = 1;
+
+ skel->bss->has_cgroup = 1;
+ fd = bpf_map__fd(skel->maps.cgroup_filter);
+
+ evlist__for_each_entry(evlist, evsel) {
+ struct cgroup *cgrp = evsel->cgrp;
+
+ if (cgrp == NULL)
+ continue;
+
+ if (!cgrp->id && read_cgroup_id(cgrp) < 0) {
+ pr_err("Failed to read cgroup id of %s\n",
+ cgrp->name);
+ goto out;
+ }
+
+ bpf_map_update_elem(fd, &cgrp->id, &val, BPF_ANY);
+ }
+ }
+
+ err = off_cpu_bpf__attach(skel);
+ if (err) {
+ pr_err("Failed to attach off-cpu BPF skeleton\n");
+ goto out;
+ }
+
+ if (perf_hooks__set_hook("record_start", off_cpu_start, evlist) ||
+ perf_hooks__set_hook("record_end", off_cpu_finish, evlist)) {
+ pr_err("Failed to attach off-cpu skeleton\n");
+ goto out;
+ }
+
+ return 0;
+
+out:
+ off_cpu_bpf__destroy(skel);
+ return -1;
+}
+
+int off_cpu_write(struct perf_session *session)
+{
+ int bytes = 0, size;
+ int fd, stack;
+ u64 sample_type, val, sid = 0;
+ struct evsel *evsel;
+ struct perf_data_file *file = &session->data->file;
+ struct off_cpu_key prev, key;
+ union off_cpu_data data = {
+ .hdr = {
+ .type = PERF_RECORD_SAMPLE,
+ .misc = PERF_RECORD_MISC_USER,
+ },
+ };
+ u64 tstamp = OFF_CPU_TIMESTAMP;
+
+ skel->bss->enabled = 0;
+
+ evsel = evlist__find_evsel_by_str(session->evlist, OFFCPU_EVENT);
+ if (evsel == NULL) {
+ pr_err("%s evsel not found\n", OFFCPU_EVENT);
+ return 0;
+ }
+
+ sample_type = evsel->core.attr.sample_type;
+
+ if (sample_type & ~OFFCPU_SAMPLE_TYPES) {
+ pr_err("not supported sample type: %llx\n",
+ (unsigned long long)sample_type);
+ return -1;
+ }
+
+ if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) {
+ if (evsel->core.id)
+ sid = evsel->core.id[0];
+ }
+
+ fd = bpf_map__fd(skel->maps.off_cpu);
+ stack = bpf_map__fd(skel->maps.stacks);
+ memset(&prev, 0, sizeof(prev));
+
+ while (!bpf_map_get_next_key(fd, &prev, &key)) {
+ int n = 1; /* start from perf_event_header */
+ int ip_pos = -1;
+
+ bpf_map_lookup_elem(fd, &key, &val);
+
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ data.array[n++] = sid;
+ if (sample_type & PERF_SAMPLE_IP) {
+ ip_pos = n;
+ data.array[n++] = 0; /* will be updated */
+ }
+ if (sample_type & PERF_SAMPLE_TID)
+ data.array[n++] = (u64)key.pid << 32 | key.tgid;
+ if (sample_type & PERF_SAMPLE_TIME)
+ data.array[n++] = tstamp;
+ if (sample_type & PERF_SAMPLE_ID)
+ data.array[n++] = sid;
+ if (sample_type & PERF_SAMPLE_CPU)
+ data.array[n++] = 0;
+ if (sample_type & PERF_SAMPLE_PERIOD)
+ data.array[n++] = val;
+ if (sample_type & PERF_SAMPLE_CALLCHAIN) {
+ int len = 0;
+
+ /* data.array[n] is callchain->nr (updated later) */
+ data.array[n + 1] = PERF_CONTEXT_USER;
+ data.array[n + 2] = 0;
+
+ bpf_map_lookup_elem(stack, &key.stack_id, &data.array[n + 2]);
+ while (data.array[n + 2 + len])
+ len++;
+
+ /* update length of callchain */
+ data.array[n] = len + 1;
+
+ /* update sample ip with the first callchain entry */
+ if (ip_pos >= 0)
+ data.array[ip_pos] = data.array[n + 2];
+
+ /* calculate sample callchain data array length */
+ n += len + 2;
+ }
+ if (sample_type & PERF_SAMPLE_CGROUP)
+ data.array[n++] = key.cgroup_id;
+
+ size = n * sizeof(u64);
+ data.hdr.size = size;
+ bytes += size;
+
+ if (perf_data_file__write(file, &data, size) < 0) {
+ pr_err("failed to write perf data, error: %m\n");
+ return bytes;
+ }
+
+ prev = key;
+ /* increase dummy timestamp to sort later samples */
+ tstamp++;
+ }
+ return bytes;
+}
diff --git a/tools/perf/util/bpf_skel/off_cpu.bpf.c b/tools/perf/util/bpf_skel/off_cpu.bpf.c
new file mode 100644
index 000000000000..cc6d7fd55118
--- /dev/null
+++ b/tools/perf/util/bpf_skel/off_cpu.bpf.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (c) 2022 Google
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+/* task->flags for off-cpu analysis */
+#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
+
+/* task->state for off-cpu analysis */
+#define TASK_INTERRUPTIBLE 0x0001
+#define TASK_UNINTERRUPTIBLE 0x0002
+
+#define MAX_STACKS 32
+#define MAX_ENTRIES 102400
+
+struct tstamp_data {
+ __u32 stack_id;
+ __u32 state;
+ __u64 timestamp;
+};
+
+struct offcpu_key {
+ __u32 pid;
+ __u32 tgid;
+ __u32 stack_id;
+ __u32 state;
+ __u64 cgroup_id;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, MAX_STACKS * sizeof(__u64));
+ __uint(max_entries, MAX_ENTRIES);
+} stacks SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct tstamp_data);
+} tstamp SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(struct offcpu_key));
+ __uint(value_size, sizeof(__u64));
+ __uint(max_entries, MAX_ENTRIES);
+} off_cpu SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u8));
+ __uint(max_entries, 1);
+} cpu_filter SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u8));
+ __uint(max_entries, 1);
+} task_filter SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u64));
+ __uint(value_size, sizeof(__u8));
+ __uint(max_entries, 1);
+} cgroup_filter SEC(".maps");
+
+/* new kernel task_struct definition */
+struct task_struct___new {
+ long __state;
+} __attribute__((preserve_access_index));
+
+/* old kernel task_struct definition */
+struct task_struct___old {
+ long state;
+} __attribute__((preserve_access_index));
+
+int enabled = 0;
+int has_cpu = 0;
+int has_task = 0;
+int has_cgroup = 0;
+
+const volatile bool has_prev_state = false;
+const volatile bool needs_cgroup = false;
+const volatile bool uses_cgroup_v1 = false;
+
+/*
+ * Old kernel used to call it task_struct->state and now it's '__state'.
+ * Use BPF CO-RE "ignored suffix rule" to deal with it like below:
+ *
+ * https://nakryiko.com/posts/bpf-core-reference-guide/#handling-incompatible-field-and-type-changes
+ */
+static inline int get_task_state(struct task_struct *t)
+{
+ /* recast pointer to capture new type for compiler */
+ struct task_struct___new *t_new = (void *)t;
+
+ if (bpf_core_field_exists(t_new->__state)) {
+ return BPF_CORE_READ(t_new, __state);
+ } else {
+ /* recast pointer to capture old type for compiler */
+ struct task_struct___old *t_old = (void *)t;
+
+ return BPF_CORE_READ(t_old, state);
+ }
+}
+
+static inline __u64 get_cgroup_id(struct task_struct *t)
+{
+ struct cgroup *cgrp;
+
+ if (uses_cgroup_v1)
+ cgrp = BPF_CORE_READ(t, cgroups, subsys[perf_event_cgrp_id], cgroup);
+ else
+ cgrp = BPF_CORE_READ(t, cgroups, dfl_cgrp);
+
+ return BPF_CORE_READ(cgrp, kn, id);
+}
+
+static inline int can_record(struct task_struct *t, int state)
+{
+ /* kernel threads don't have user stack */
+ if (t->flags & PF_KTHREAD)
+ return 0;
+
+ if (state != TASK_INTERRUPTIBLE &&
+ state != TASK_UNINTERRUPTIBLE)
+ return 0;
+
+ if (has_cpu) {
+ __u32 cpu = bpf_get_smp_processor_id();
+ __u8 *ok;
+
+ ok = bpf_map_lookup_elem(&cpu_filter, &cpu);
+ if (!ok)
+ return 0;
+ }
+
+ if (has_task) {
+ __u8 *ok;
+ __u32 pid = t->pid;
+
+ ok = bpf_map_lookup_elem(&task_filter, &pid);
+ if (!ok)
+ return 0;
+ }
+
+ if (has_cgroup) {
+ __u8 *ok;
+ __u64 cgrp_id = get_cgroup_id(t);
+
+ ok = bpf_map_lookup_elem(&cgroup_filter, &cgrp_id);
+ if (!ok)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int off_cpu_stat(u64 *ctx, struct task_struct *prev,
+ struct task_struct *next, int state)
+{
+ __u64 ts;
+ __u32 stack_id;
+ struct tstamp_data *pelem;
+
+ ts = bpf_ktime_get_ns();
+
+ if (!can_record(prev, state))
+ goto next;
+
+ stack_id = bpf_get_stackid(ctx, &stacks,
+ BPF_F_FAST_STACK_CMP | BPF_F_USER_STACK);
+
+ pelem = bpf_task_storage_get(&tstamp, prev, NULL,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!pelem)
+ goto next;
+
+ pelem->timestamp = ts;
+ pelem->state = state;
+ pelem->stack_id = stack_id;
+
+next:
+ pelem = bpf_task_storage_get(&tstamp, next, NULL, 0);
+
+ if (pelem && pelem->timestamp) {
+ struct offcpu_key key = {
+ .pid = next->pid,
+ .tgid = next->tgid,
+ .stack_id = pelem->stack_id,
+ .state = pelem->state,
+ .cgroup_id = needs_cgroup ? get_cgroup_id(next) : 0,
+ };
+ __u64 delta = ts - pelem->timestamp;
+ __u64 *total;
+
+ total = bpf_map_lookup_elem(&off_cpu, &key);
+ if (total)
+ *total += delta;
+ else
+ bpf_map_update_elem(&off_cpu, &key, &delta, BPF_ANY);
+
+ /* prevent to reuse the timestamp later */
+ pelem->timestamp = 0;
+ }
+
+ return 0;
+}
+
+SEC("tp_btf/sched_switch")
+int on_switch(u64 *ctx)
+{
+ struct task_struct *prev, *next;
+ int prev_state;
+
+ if (!enabled)
+ return 0;
+
+ prev = (struct task_struct *)ctx[1];
+ next = (struct task_struct *)ctx[2];
+
+ if (has_prev_state)
+ prev_state = (int)ctx[3];
+ else
+ prev_state = get_task_state(prev);
+
+ return off_cpu_stat(ctx, prev, next, prev_state);
+}
+
+char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 82f3d46bea70..328668f38c69 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -872,6 +872,30 @@ out_free:
return err;
}
+static int filename__read_build_id_ns(const char *filename,
+ struct build_id *bid,
+ struct nsinfo *nsi)
+{
+ struct nscookie nsc;
+ int ret;
+
+ nsinfo__mountns_enter(nsi, &nsc);
+ ret = filename__read_build_id(filename, bid);
+ nsinfo__mountns_exit(&nsc);
+
+ return ret;
+}
+
+static bool dso__build_id_mismatch(struct dso *dso, const char *name)
+{
+ struct build_id bid;
+
+ if (filename__read_build_id_ns(name, &bid, dso->nsinfo) < 0)
+ return false;
+
+ return !dso__build_id_equal(dso, &bid);
+}
+
static int dso__cache_build_id(struct dso *dso, struct machine *machine,
void *priv __maybe_unused)
{
@@ -886,6 +910,10 @@ static int dso__cache_build_id(struct dso *dso, struct machine *machine,
is_kallsyms = true;
name = machine->mmap_name;
}
+
+ if (!is_kallsyms && dso__build_id_mismatch(dso, name))
+ return 0;
+
return build_id_cache__add_b(&dso->bid, name, dso->nsinfo,
is_kallsyms, is_vdso);
}
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index a5ace2bbc28d..caabeac24c69 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -479,6 +479,20 @@ int perf_data__make_kcore_dir(struct perf_data *data, char *buf, size_t buf_sz)
return mkdir(buf, S_IRWXU);
}
+bool has_kcore_dir(const char *path)
+{
+ char *kcore_dir;
+ int ret;
+
+ if (asprintf(&kcore_dir, "%s/kcore_dir", path) < 0)
+ return false;
+
+ ret = access(kcore_dir, F_OK);
+
+ free(kcore_dir);
+ return !ret;
+}
+
char *perf_data__kallsyms_name(struct perf_data *data)
{
char *kallsyms_name;
diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
index c9de82af5584..7de53d6e2d7f 100644
--- a/tools/perf/util/data.h
+++ b/tools/perf/util/data.h
@@ -4,6 +4,7 @@
#include <stdio.h>
#include <stdbool.h>
+#include <linux/types.h>
enum perf_data_mode {
PERF_DATA_MODE_WRITE,
@@ -98,6 +99,7 @@ void perf_data__close_dir(struct perf_data *data);
int perf_data__update_dir(struct perf_data *data);
unsigned long perf_data__size(struct perf_data *data);
int perf_data__make_kcore_dir(struct perf_data *data, char *buf, size_t buf_sz);
+bool has_kcore_dir(const char *path);
char *perf_data__kallsyms_name(struct perf_data *data);
bool is_perf_data(const char *path);
#endif /* __PERF_DATA_H */
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 3a9fd4d389b5..97047a11282b 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -196,7 +196,9 @@ struct dso {
u32 status_seen;
u64 file_size;
struct list_head open_entry;
+ u64 elf_base_addr;
u64 debug_frame_offset;
+ u64 eh_frame_hdr_addr;
u64 eh_frame_hdr_offset;
} data;
/* bpf prog information */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 6439c888ae38..0476bb3a4188 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -683,9 +683,12 @@ static bool check_address_range(struct intlist *addr_list, int addr_range,
int machine__resolve(struct machine *machine, struct addr_location *al,
struct perf_sample *sample)
{
- struct thread *thread = machine__findnew_thread(machine, sample->pid,
- sample->tid);
+ struct thread *thread;
+ if (symbol_conf.guest_code && !machine__is_host(machine))
+ thread = machine__findnew_guest_code(machine, sample->pid);
+ else
+ thread = machine__findnew_thread(machine, sample->pid, sample->tid);
if (thread == NULL)
return -1;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 52ea004ba01e..48af7d379d82 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -242,14 +242,20 @@ int __evlist__add_default(struct evlist *evlist, bool precise)
return 0;
}
-int evlist__add_dummy(struct evlist *evlist)
+static struct evsel *evlist__dummy_event(struct evlist *evlist)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_DUMMY,
.size = sizeof(attr), /* to capture ABI version */
};
- struct evsel *evsel = evsel__new_idx(&attr, evlist->core.nr_entries);
+
+ return evsel__new_idx(&attr, evlist->core.nr_entries);
+}
+
+int evlist__add_dummy(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__dummy_event(evlist);
if (evsel == NULL)
return -ENOMEM;
@@ -258,6 +264,51 @@ int evlist__add_dummy(struct evlist *evlist)
return 0;
}
+static void evlist__add_on_all_cpus(struct evlist *evlist, struct evsel *evsel)
+{
+ evsel->core.system_wide = true;
+
+ /*
+ * All CPUs.
+ *
+ * Note perf_event_open() does not accept CPUs that are not online, so
+ * in fact this CPU list will include only all online CPUs.
+ */
+ perf_cpu_map__put(evsel->core.own_cpus);
+ evsel->core.own_cpus = perf_cpu_map__new(NULL);
+ perf_cpu_map__put(evsel->core.cpus);
+ evsel->core.cpus = perf_cpu_map__get(evsel->core.own_cpus);
+
+ /* No threads */
+ perf_thread_map__put(evsel->core.threads);
+ evsel->core.threads = perf_thread_map__new_dummy();
+
+ evlist__add(evlist, evsel);
+}
+
+struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide)
+{
+ struct evsel *evsel = evlist__dummy_event(evlist);
+
+ if (!evsel)
+ return NULL;
+
+ evsel->core.attr.exclude_kernel = 1;
+ evsel->core.attr.exclude_guest = 1;
+ evsel->core.attr.exclude_hv = 1;
+ evsel->core.attr.freq = 0;
+ evsel->core.attr.sample_period = 1;
+ evsel->no_aux_samples = true;
+ evsel->name = strdup("dummy:u");
+
+ if (system_wide)
+ evlist__add_on_all_cpus(evlist, evsel);
+ else
+ evlist__add(evlist, evsel);
+
+ return evsel;
+}
+
static int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
{
struct evsel *evsel, *n;
@@ -334,14 +385,6 @@ int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name,
return 0;
}
-static int evlist__nr_threads(struct evlist *evlist, struct evsel *evsel)
-{
- if (evsel->core.system_wide)
- return 1;
- else
- return perf_thread_map__nr(evlist->core.threads);
-}
-
struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity)
{
struct evlist_cpu_iterator itr = {
@@ -546,48 +589,6 @@ void evlist__toggle_enable(struct evlist *evlist)
(evlist->enabled ? evlist__disable : evlist__enable)(evlist);
}
-static int evlist__enable_event_cpu(struct evlist *evlist, struct evsel *evsel, int cpu)
-{
- int thread;
- int nr_threads = evlist__nr_threads(evlist, evsel);
-
- if (!evsel->core.fd)
- return -EINVAL;
-
- for (thread = 0; thread < nr_threads; thread++) {
- int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
- if (err)
- return err;
- }
- return 0;
-}
-
-static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evsel, int thread)
-{
- int cpu;
- int nr_cpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
-
- if (!evsel->core.fd)
- return -EINVAL;
-
- for (cpu = 0; cpu < nr_cpus; cpu++) {
- int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
- if (err)
- return err;
- }
- return 0;
-}
-
-int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx)
-{
- bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
-
- if (per_cpu_mmaps)
- return evlist__enable_event_cpu(evlist, evsel, idx);
-
- return evlist__enable_event_thread(evlist, evsel, idx);
-}
-
int evlist__add_pollfd(struct evlist *evlist, int fd)
{
return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default);
@@ -797,13 +798,15 @@ static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
static void
perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist,
+ struct perf_evsel *_evsel,
struct perf_mmap_param *_mp,
- int idx, bool per_cpu)
+ int idx)
{
struct evlist *evlist = container_of(_evlist, struct evlist, core);
struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
+ struct evsel *evsel = container_of(_evsel, struct evsel, core);
- auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, idx, per_cpu);
+ auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, evsel, idx);
}
static struct perf_mmap*
@@ -1790,8 +1793,13 @@ struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel *
if (evsel__has_leader(c2, leader)) {
if (is_open && close)
perf_evsel__close(&c2->core);
- evsel__set_leader(c2, c2);
- c2->core.nr_members = 0;
+ /*
+ * We want to close all members of the group and reopen
+ * them. Some events, like Intel topdown, require being
+ * in a group and so keep these in the group.
+ */
+ evsel__remove_from_group(c2, leader);
+
/*
* Set this for all former members of the group
* to indicate they get reopened.
@@ -1799,6 +1807,9 @@ struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel *
c2->reset_group = true;
}
}
+ /* Reset the leader count if all entries were removed. */
+ if (leader->core.nr_members == 1)
+ leader->core.nr_members = 0;
return leader;
}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index a21daaa5fc1b..1bde9ccf4e7d 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -114,6 +114,11 @@ int arch_evlist__add_default_attrs(struct evlist *evlist);
struct evsel *arch_evlist__leader(struct list_head *list);
int evlist__add_dummy(struct evlist *evlist);
+struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide);
+static inline struct evsel *evlist__add_dummy_on_all_cpus(struct evlist *evlist)
+{
+ return evlist__add_aux_dummy(evlist, true);
+}
int evlist__add_sb_event(struct evlist *evlist, struct perf_event_attr *attr,
evsel__sb_cb_t cb, void *data);
@@ -196,8 +201,6 @@ void evlist__toggle_enable(struct evlist *evlist);
void evlist__disable_evsel(struct evlist *evlist, char *evsel_name);
void evlist__enable_evsel(struct evlist *evlist, char *evsel_name);
-int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx);
-
void evlist__set_selected(struct evlist *evlist, struct evsel *evsel);
int evlist__create_maps(struct evlist *evlist, struct target *target);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 2a1729e7aee4..094b0a9c0bc0 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -48,6 +48,7 @@
#include "util.h"
#include "hashmap.h"
#include "pmu-hybrid.h"
+#include "off_cpu.h"
#include "../perf-sys.h"
#include "util/parse-branch-options.h"
#include <internal/xyarray.h>
@@ -59,6 +60,33 @@ struct perf_missing_features perf_missing_features;
static clockid_t clockid;
+static const char *const perf_tool_event__tool_names[PERF_TOOL_MAX] = {
+ NULL,
+ "duration_time",
+ "user_time",
+ "system_time",
+};
+
+const char *perf_tool_event__to_str(enum perf_tool_event ev)
+{
+ if (ev > PERF_TOOL_NONE && ev < PERF_TOOL_MAX)
+ return perf_tool_event__tool_names[ev];
+
+ return NULL;
+}
+
+enum perf_tool_event perf_tool_event__from_str(const char *str)
+{
+ int i;
+
+ perf_tool_event__for_each_event(i) {
+ if (!strcmp(str, perf_tool_event__tool_names[i]))
+ return i;
+ }
+ return PERF_TOOL_NONE;
+}
+
+
static int evsel__no_extra_init(struct evsel *evsel __maybe_unused)
{
return 0;
@@ -269,8 +297,8 @@ struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx)
return NULL;
evsel__init(evsel, attr, idx);
- if (evsel__is_bpf_output(evsel)) {
- evsel->core.attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
+ if (evsel__is_bpf_output(evsel) && !attr->sample_type) {
+ evsel->core.attr.sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
evsel->core.attr.sample_period = 1;
}
@@ -382,6 +410,7 @@ struct evsel *evsel__clone(struct evsel *orig)
evsel->core.threads = perf_thread_map__get(orig->core.threads);
evsel->core.nr_members = orig->core.nr_members;
evsel->core.system_wide = orig->core.system_wide;
+ evsel->core.requires_cpu = orig->core.requires_cpu;
if (orig->name) {
evsel->name = strdup(orig->name);
@@ -486,7 +515,7 @@ out_err:
return ERR_PTR(err);
}
-const char *evsel__hw_names[PERF_COUNT_HW_MAX] = {
+const char *const evsel__hw_names[PERF_COUNT_HW_MAX] = {
"cycles",
"instructions",
"cache-references",
@@ -571,7 +600,7 @@ static int evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
return r + evsel__add_modifiers(evsel, bf + r, size - r);
}
-const char *evsel__sw_names[PERF_COUNT_SW_MAX] = {
+const char *const evsel__sw_names[PERF_COUNT_SW_MAX] = {
"cpu-clock",
"task-clock",
"page-faults",
@@ -597,6 +626,11 @@ static int evsel__sw_name(struct evsel *evsel, char *bf, size_t size)
return r + evsel__add_modifiers(evsel, bf + r, size - r);
}
+static int evsel__tool_name(enum perf_tool_event ev, char *bf, size_t size)
+{
+ return scnprintf(bf, size, "%s", perf_tool_event__to_str(ev));
+}
+
static int __evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
{
int r;
@@ -622,7 +656,7 @@ static int evsel__bp_name(struct evsel *evsel, char *bf, size_t size)
return r + evsel__add_modifiers(evsel, bf + r, size - r);
}
-const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES] = {
+const char *const evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES] = {
{ "L1-dcache", "l1-d", "l1d", "L1-data", },
{ "L1-icache", "l1-i", "l1i", "L1-instruction", },
{ "LLC", "L2", },
@@ -632,13 +666,13 @@ const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES] = {
{ "node", },
};
-const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES] = {
+const char *const evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES] = {
{ "load", "loads", "read", },
{ "store", "stores", "write", },
{ "prefetch", "prefetches", "speculative-read", "speculative-load", },
};
-const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES] = {
+const char *const evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES] = {
{ "refs", "Reference", "ops", "access", },
{ "misses", "miss", },
};
@@ -654,7 +688,7 @@ const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_AL
* L1I : Read and prefetch only
* ITLB and BPU : Read-only
*/
-static unsigned long evsel__hw_cache_stat[C(MAX)] = {
+static const unsigned long evsel__hw_cache_stat[C(MAX)] = {
[C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
[C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
[C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
@@ -723,12 +757,6 @@ static int evsel__raw_name(struct evsel *evsel, char *bf, size_t size)
return ret + evsel__add_modifiers(evsel, bf + ret, size - ret);
}
-static int evsel__tool_name(char *bf, size_t size)
-{
- int ret = scnprintf(bf, size, "duration_time");
- return ret;
-}
-
const char *evsel__name(struct evsel *evsel)
{
char bf[128];
@@ -753,8 +781,8 @@ const char *evsel__name(struct evsel *evsel)
break;
case PERF_TYPE_SOFTWARE:
- if (evsel->tool_event)
- evsel__tool_name(bf, sizeof(bf));
+ if (evsel__is_tool(evsel))
+ evsel__tool_name(evsel->tool_event, bf, sizeof(bf));
else
evsel__sw_name(evsel, bf, sizeof(bf));
break;
@@ -786,8 +814,8 @@ const char *evsel__metric_id(const struct evsel *evsel)
if (evsel->metric_id)
return evsel->metric_id;
- if (evsel->core.attr.type == PERF_TYPE_SOFTWARE && evsel->tool_event)
- return "duration_time";
+ if (evsel__is_tool(evsel))
+ return perf_tool_event__to_str(evsel->tool_event);
return "unknown";
}
@@ -870,7 +898,7 @@ static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *o
"specifying a subset with --user-regs may render DWARF unwinding unreliable, "
"so the minimal registers set (IP, SP) is explicitly forced.\n");
} else {
- attr->sample_regs_user |= PERF_REGS_MASK;
+ attr->sample_regs_user |= arch__user_reg_mask();
}
attr->sample_stack_user = param->dump_size;
attr->exclude_callchain_user = 1;
@@ -1075,6 +1103,11 @@ static void evsel__set_default_freq_period(struct record_opts *opts,
}
}
+static bool evsel__is_offcpu_event(struct evsel *evsel)
+{
+ return evsel__is_bpf_output(evsel) && !strcmp(evsel->name, OFFCPU_EVENT);
+}
+
/*
* The enable_on_exec/disabled value strategy:
*
@@ -1339,6 +1372,9 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
*/
if (evsel__is_dummy_event(evsel))
evsel__reset_sample_bit(evsel, BRANCH_STACK);
+
+ if (evsel__is_offcpu_event(evsel))
+ evsel->core.attr.sample_type &= OFFCPU_SAMPLE_TYPES;
}
int evsel__set_filter(struct evsel *evsel, const char *filter)
@@ -3077,3 +3113,22 @@ int evsel__source_count(const struct evsel *evsel)
}
return count;
}
+
+bool __weak arch_evsel__must_be_in_group(const struct evsel *evsel __maybe_unused)
+{
+ return false;
+}
+
+/*
+ * Remove an event from a given group (leader).
+ * Some events, e.g., perf metrics Topdown events,
+ * must always be grouped. Ignore the events.
+ */
+void evsel__remove_from_group(struct evsel *evsel, struct evsel *leader)
+{
+ if (!arch_evsel__must_be_in_group(evsel) && evsel != leader) {
+ evsel__set_leader(evsel, evsel);
+ evsel->core.nr_members = 0;
+ leader->core.nr_members--;
+ }
+}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 041b42d33bf5..73ea48e94079 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -30,8 +30,18 @@ typedef int (evsel__sb_cb_t)(union perf_event *event, void *data);
enum perf_tool_event {
PERF_TOOL_NONE = 0,
PERF_TOOL_DURATION_TIME = 1,
+ PERF_TOOL_USER_TIME = 2,
+ PERF_TOOL_SYSTEM_TIME = 3,
+
+ PERF_TOOL_MAX,
};
+const char *perf_tool_event__to_str(enum perf_tool_event ev);
+enum perf_tool_event perf_tool_event__from_str(const char *str);
+
+#define perf_tool_event__for_each_event(ev) \
+ for ((ev) = PERF_TOOL_DURATION_TIME; (ev) < PERF_TOOL_MAX; ev++)
+
/** struct evsel - event selector
*
* @evlist - evlist this evsel is in, if it is in one.
@@ -120,6 +130,7 @@ struct evsel {
bool merged_stat;
bool reset_group;
bool errored;
+ bool needs_auxtrace_mmap;
struct hashmap *per_pkg_mask;
int err;
struct {
@@ -253,11 +264,11 @@ static inline bool evsel__is_bpf(struct evsel *evsel)
#define EVSEL__MAX_ALIASES 8
-extern const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES];
-extern const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES];
-extern const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES];
-extern const char *evsel__hw_names[PERF_COUNT_HW_MAX];
-extern const char *evsel__sw_names[PERF_COUNT_SW_MAX];
+extern const char *const evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES];
+extern const char *const evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES];
+extern const char *const evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES];
+extern const char *const evsel__hw_names[PERF_COUNT_HW_MAX];
+extern const char *const evsel__sw_names[PERF_COUNT_SW_MAX];
extern char *evsel__bpf_counter_events;
bool evsel__match_bpf_counter_events(const char *name);
@@ -265,6 +276,11 @@ int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size
const char *evsel__name(struct evsel *evsel);
const char *evsel__metric_id(const struct evsel *evsel);
+static inline bool evsel__is_tool(const struct evsel *evsel)
+{
+ return evsel->tool_event != PERF_TOOL_NONE;
+}
+
const char *evsel__group_name(struct evsel *evsel);
int evsel__group_desc(struct evsel *evsel, char *buf, size_t size);
@@ -483,6 +499,9 @@ bool evsel__has_leader(struct evsel *evsel, struct evsel *leader);
bool evsel__is_leader(struct evsel *evsel);
void evsel__set_leader(struct evsel *evsel, struct evsel *leader);
int evsel__source_count(const struct evsel *evsel);
+void evsel__remove_from_group(struct evsel *evsel, struct evsel *leader);
+
+bool arch_evsel__must_be_in_group(const struct evsel *evsel);
/*
* Macro to swap the bit-field postition and size.
diff --git a/tools/perf/util/expr.l b/tools/perf/util/expr.l
index 0a13eb20c814..4dc8edbfd9ce 100644
--- a/tools/perf/util/expr.l
+++ b/tools/perf/util/expr.l
@@ -91,7 +91,7 @@ static int literal(yyscan_t scanner)
}
%}
-number ([0-9]+\.?[0-9]*|[0-9]*\.?[0-9]+)
+number ([0-9]+\.?[0-9]*|[0-9]*\.?[0-9]+)(e-?[0-9]+)?
sch [-,=]
spec \\{sch}
diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h
index 3db3293213a9..ae138afe6c56 100644
--- a/tools/perf/util/genelf.h
+++ b/tools/perf/util/genelf.h
@@ -38,6 +38,9 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
#elif defined(__s390x__)
#define GEN_ELF_ARCH EM_S390
#define GEN_ELF_CLASS ELFCLASS64
+#elif defined(__riscv) && __riscv_xlen == 64
+#define GEN_ELF_ARCH EM_RISCV
+#define GEN_ELF_CLASS ELFCLASS64
#else
#error "unsupported architecture"
#endif
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index a27132e5a5ef..6ad629db63b7 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -3462,9 +3462,22 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
return 0;
}
+struct header_fw {
+ struct feat_writer fw;
+ struct feat_fd *ff;
+};
+
+static int feat_writer_cb(struct feat_writer *fw, void *buf, size_t sz)
+{
+ struct header_fw *h = container_of(fw, struct header_fw, fw);
+
+ return do_write(h->ff, buf, sz);
+}
+
static int do_write_feat(struct feat_fd *ff, int type,
struct perf_file_section **p,
- struct evlist *evlist)
+ struct evlist *evlist,
+ struct feat_copier *fc)
{
int err;
int ret = 0;
@@ -3478,7 +3491,23 @@ static int do_write_feat(struct feat_fd *ff, int type,
(*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
- err = feat_ops[type].write(ff, evlist);
+ /*
+ * Hook to let perf inject copy features sections from the input
+ * file.
+ */
+ if (fc && fc->copy) {
+ struct header_fw h = {
+ .fw.write = feat_writer_cb,
+ .ff = ff,
+ };
+
+ /* ->copy() returns 0 if the feature was not copied */
+ err = fc->copy(fc, type, &h.fw);
+ } else {
+ err = 0;
+ }
+ if (!err)
+ err = feat_ops[type].write(ff, evlist);
if (err < 0) {
pr_debug("failed to write feature %s\n", feat_ops[type].name);
@@ -3494,7 +3523,8 @@ static int do_write_feat(struct feat_fd *ff, int type,
}
static int perf_header__adds_write(struct perf_header *header,
- struct evlist *evlist, int fd)
+ struct evlist *evlist, int fd,
+ struct feat_copier *fc)
{
int nr_sections;
struct feat_fd ff;
@@ -3523,7 +3553,7 @@ static int perf_header__adds_write(struct perf_header *header,
lseek(fd, sec_start + sec_size, SEEK_SET);
for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
- if (do_write_feat(&ff, feat, &p, evlist))
+ if (do_write_feat(&ff, feat, &p, evlist, fc))
perf_header__clear_feat(header, feat);
}
@@ -3561,9 +3591,10 @@ int perf_header__write_pipe(int fd)
return 0;
}
-int perf_session__write_header(struct perf_session *session,
- struct evlist *evlist,
- int fd, bool at_exit)
+static int perf_session__do_write_header(struct perf_session *session,
+ struct evlist *evlist,
+ int fd, bool at_exit,
+ struct feat_copier *fc)
{
struct perf_file_header f_header;
struct perf_file_attr f_attr;
@@ -3615,7 +3646,7 @@ int perf_session__write_header(struct perf_session *session,
header->feat_offset = header->data_offset + header->data_size;
if (at_exit) {
- err = perf_header__adds_write(header, evlist, fd);
+ err = perf_header__adds_write(header, evlist, fd, fc);
if (err < 0)
return err;
}
@@ -3648,6 +3679,35 @@ int perf_session__write_header(struct perf_session *session,
return 0;
}
+int perf_session__write_header(struct perf_session *session,
+ struct evlist *evlist,
+ int fd, bool at_exit)
+{
+ return perf_session__do_write_header(session, evlist, fd, at_exit, NULL);
+}
+
+size_t perf_session__data_offset(const struct evlist *evlist)
+{
+ struct evsel *evsel;
+ size_t data_offset;
+
+ data_offset = sizeof(struct perf_file_header);
+ evlist__for_each_entry(evlist, evsel) {
+ data_offset += evsel->core.ids * sizeof(u64);
+ }
+ data_offset += evlist->core.nr_entries * sizeof(struct perf_file_attr);
+
+ return data_offset;
+}
+
+int perf_session__inject_header(struct perf_session *session,
+ struct evlist *evlist,
+ int fd,
+ struct feat_copier *fc)
+{
+ return perf_session__do_write_header(session, evlist, fd, true, fc);
+}
+
static int perf_header__getbuffer64(struct perf_header *header,
int fd, void *buf, size_t size)
{
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 0eb4bc29a5a4..56916dabce7b 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -121,6 +121,23 @@ int perf_session__write_header(struct perf_session *session,
int fd, bool at_exit);
int perf_header__write_pipe(int fd);
+/* feat_writer writes a feature section to output */
+struct feat_writer {
+ int (*write)(struct feat_writer *fw, void *buf, size_t sz);
+};
+
+/* feat_copier copies a feature section using feat_writer to output */
+struct feat_copier {
+ int (*copy)(struct feat_copier *fc, int feat, struct feat_writer *fw);
+};
+
+int perf_session__inject_header(struct perf_session *session,
+ struct evlist *evlist,
+ int fd,
+ struct feat_copier *fc);
+
+size_t perf_session__data_offset(const struct evlist *evlist);
+
void perf_header__set_feat(struct perf_header *header, int feat);
void perf_header__clear_feat(struct perf_header *header, int feat);
bool perf_header__has_feat(const struct perf_header *header, int feat);
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index e1d8f7504cbe..0ac860c8dd2b 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -137,6 +137,7 @@ struct intel_pt_decoder {
bool in_psb;
bool hop;
bool leap;
+ bool emulated_ptwrite;
bool vm_time_correlation;
bool vm_tm_corr_dry_run;
bool vm_tm_corr_reliable;
@@ -481,6 +482,8 @@ static int intel_pt_ext_err(int code)
return INTEL_PT_ERR_LOST;
case -ELOOP:
return INTEL_PT_ERR_NELOOP;
+ case -ECONNRESET:
+ return INTEL_PT_ERR_EPTW;
default:
return INTEL_PT_ERR_UNK;
}
@@ -497,6 +500,7 @@ static const char *intel_pt_err_msgs[] = {
[INTEL_PT_ERR_LOST] = "Lost trace data",
[INTEL_PT_ERR_UNK] = "Unknown error!",
[INTEL_PT_ERR_NELOOP] = "Never-ending loop (refer perf config intel-pt.max-loops)",
+ [INTEL_PT_ERR_EPTW] = "Broken emulated ptwrite",
};
int intel_pt__strerror(int code, char *buf, size_t buflen)
@@ -1535,17 +1539,108 @@ static int intel_pt_walk_tip(struct intel_pt_decoder *decoder)
return intel_pt_bug(decoder);
}
+struct eptw_data {
+ int bit_countdown;
+ uint64_t payload;
+};
+
+static int intel_pt_eptw_lookahead_cb(struct intel_pt_pkt_info *pkt_info)
+{
+ struct eptw_data *data = pkt_info->data;
+ int nr_bits;
+
+ switch (pkt_info->packet.type) {
+ case INTEL_PT_PAD:
+ case INTEL_PT_MNT:
+ case INTEL_PT_MODE_EXEC:
+ case INTEL_PT_MODE_TSX:
+ case INTEL_PT_MTC:
+ case INTEL_PT_FUP:
+ case INTEL_PT_CYC:
+ case INTEL_PT_CBR:
+ case INTEL_PT_TSC:
+ case INTEL_PT_TMA:
+ case INTEL_PT_PIP:
+ case INTEL_PT_VMCS:
+ case INTEL_PT_PSB:
+ case INTEL_PT_PSBEND:
+ case INTEL_PT_PTWRITE:
+ case INTEL_PT_PTWRITE_IP:
+ case INTEL_PT_EXSTOP:
+ case INTEL_PT_EXSTOP_IP:
+ case INTEL_PT_MWAIT:
+ case INTEL_PT_PWRE:
+ case INTEL_PT_PWRX:
+ case INTEL_PT_BBP:
+ case INTEL_PT_BIP:
+ case INTEL_PT_BEP:
+ case INTEL_PT_BEP_IP:
+ case INTEL_PT_CFE:
+ case INTEL_PT_CFE_IP:
+ case INTEL_PT_EVD:
+ break;
+
+ case INTEL_PT_TNT:
+ nr_bits = data->bit_countdown;
+ if (nr_bits > pkt_info->packet.count)
+ nr_bits = pkt_info->packet.count;
+ data->payload <<= nr_bits;
+ data->payload |= pkt_info->packet.payload >> (64 - nr_bits);
+ data->bit_countdown -= nr_bits;
+ return !data->bit_countdown;
+
+ case INTEL_PT_TIP_PGE:
+ case INTEL_PT_TIP_PGD:
+ case INTEL_PT_TIP:
+ case INTEL_PT_BAD:
+ case INTEL_PT_OVF:
+ case INTEL_PT_TRACESTOP:
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
+static int intel_pt_emulated_ptwrite(struct intel_pt_decoder *decoder)
+{
+ int n = 64 - decoder->tnt.count;
+ struct eptw_data data = {
+ .bit_countdown = n,
+ .payload = decoder->tnt.payload >> n,
+ };
+
+ decoder->emulated_ptwrite = false;
+ intel_pt_log("Emulated ptwrite detected\n");
+
+ intel_pt_pkt_lookahead(decoder, intel_pt_eptw_lookahead_cb, &data);
+ if (data.bit_countdown)
+ return -ECONNRESET;
+
+ decoder->state.type = INTEL_PT_PTW;
+ decoder->state.from_ip = decoder->ip;
+ decoder->state.to_ip = 0;
+ decoder->state.ptw_payload = data.payload;
+ return 0;
+}
+
static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
{
struct intel_pt_insn intel_pt_insn;
int err;
while (1) {
+ if (decoder->emulated_ptwrite)
+ return intel_pt_emulated_ptwrite(decoder);
err = intel_pt_walk_insn(decoder, &intel_pt_insn, 0);
- if (err == INTEL_PT_RETURN)
+ if (err == INTEL_PT_RETURN) {
+ decoder->emulated_ptwrite = intel_pt_insn.emulated_ptwrite;
return 0;
- if (err)
+ }
+ if (err) {
+ decoder->emulated_ptwrite = false;
return err;
+ }
if (intel_pt_insn.op == INTEL_PT_OP_RET) {
if (!decoder->return_compression) {
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
index efb2cb3ae0ca..c773028df80e 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
@@ -58,6 +58,7 @@ enum {
INTEL_PT_ERR_LOST,
INTEL_PT_ERR_UNK,
INTEL_PT_ERR_NELOOP,
+ INTEL_PT_ERR_EPTW,
INTEL_PT_ERR_MAX,
};
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
index 9d5e65cec89b..1376077183f7 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
@@ -32,6 +32,7 @@ static void intel_pt_insn_decoder(struct insn *insn,
int ext;
intel_pt_insn->rel = 0;
+ intel_pt_insn->emulated_ptwrite = false;
if (insn_is_avx(insn)) {
intel_pt_insn->op = INTEL_PT_OP_OTHER;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h
index c2861cfdd768..e3338b56a75f 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h
@@ -37,6 +37,7 @@ enum intel_pt_insn_branch {
struct intel_pt_insn {
enum intel_pt_insn_op op;
enum intel_pt_insn_branch branch;
+ bool emulated_ptwrite;
int length;
int32_t rel;
unsigned char buf[INTEL_PT_INSN_BUF_SZ];
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index ec43d364d0de..62b2f375a94d 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -192,6 +192,7 @@ struct intel_pt_queue {
pid_t next_tid;
struct thread *thread;
struct machine *guest_machine;
+ struct thread *guest_thread;
struct thread *unknown_guest_thread;
pid_t guest_machine_pid;
bool exclude_kernel;
@@ -530,6 +531,7 @@ struct intel_pt_cache_entry {
u64 byte_cnt;
enum intel_pt_insn_op op;
enum intel_pt_insn_branch branch;
+ bool emulated_ptwrite;
int length;
int32_t rel;
char insn[INTEL_PT_INSN_BUF_SZ];
@@ -616,6 +618,7 @@ static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
e->byte_cnt = byte_cnt;
e->op = intel_pt_insn->op;
e->branch = intel_pt_insn->branch;
+ e->emulated_ptwrite = intel_pt_insn->emulated_ptwrite;
e->length = intel_pt_insn->length;
e->rel = intel_pt_insn->rel;
memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
@@ -688,6 +691,11 @@ static int intel_pt_get_guest(struct intel_pt_queue *ptq)
ptq->guest_machine = NULL;
thread__zput(ptq->unknown_guest_thread);
+ if (symbol_conf.guest_code) {
+ thread__zput(ptq->guest_thread);
+ ptq->guest_thread = machines__findnew_guest_code(machines, pid);
+ }
+
machine = machines__find_guest(machines, pid);
if (!machine)
return -1;
@@ -702,6 +710,28 @@ static int intel_pt_get_guest(struct intel_pt_queue *ptq)
return 0;
}
+static inline bool intel_pt_jmp_16(struct intel_pt_insn *intel_pt_insn)
+{
+ return intel_pt_insn->rel == 16 && intel_pt_insn->branch == INTEL_PT_BR_UNCONDITIONAL;
+}
+
+#define PTWRITE_MAGIC "\x0f\x0bperf,ptwrite "
+#define PTWRITE_MAGIC_LEN 16
+
+static bool intel_pt_emulated_ptwrite(struct dso *dso, struct machine *machine, u64 offset)
+{
+ unsigned char buf[PTWRITE_MAGIC_LEN];
+ ssize_t len;
+
+ len = dso__data_read_offset(dso, machine, offset, buf, PTWRITE_MAGIC_LEN);
+ if (len == PTWRITE_MAGIC_LEN && !memcmp(buf, PTWRITE_MAGIC, PTWRITE_MAGIC_LEN)) {
+ intel_pt_log("Emulated ptwrite signature found\n");
+ return true;
+ }
+ intel_pt_log("Emulated ptwrite signature not found\n");
+ return false;
+}
+
static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
uint64_t *insn_cnt_ptr, uint64_t *ip,
uint64_t to_ip, uint64_t max_insn_cnt,
@@ -729,11 +759,16 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
cpumode = intel_pt_nr_cpumode(ptq, *ip, nr);
if (nr) {
- if (cpumode != PERF_RECORD_MISC_GUEST_KERNEL ||
+ if ((!symbol_conf.guest_code && cpumode != PERF_RECORD_MISC_GUEST_KERNEL) ||
intel_pt_get_guest(ptq))
return -EINVAL;
machine = ptq->guest_machine;
- thread = ptq->unknown_guest_thread;
+ thread = ptq->guest_thread;
+ if (!thread) {
+ if (cpumode != PERF_RECORD_MISC_GUEST_KERNEL)
+ return -EINVAL;
+ thread = ptq->unknown_guest_thread;
+ }
} else {
thread = ptq->thread;
if (!thread) {
@@ -764,6 +799,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
*ip += e->byte_cnt;
intel_pt_insn->op = e->op;
intel_pt_insn->branch = e->branch;
+ intel_pt_insn->emulated_ptwrite = e->emulated_ptwrite;
intel_pt_insn->length = e->length;
intel_pt_insn->rel = e->rel;
memcpy(intel_pt_insn->buf, e->insn,
@@ -795,8 +831,18 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
insn_cnt += 1;
- if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH)
+ if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH) {
+ bool eptw;
+ u64 offs;
+
+ if (!intel_pt_jmp_16(intel_pt_insn))
+ goto out;
+ /* Check for emulated ptwrite */
+ offs = offset + intel_pt_insn->length;
+ eptw = intel_pt_emulated_ptwrite(al.map->dso, machine, offs);
+ intel_pt_insn->emulated_ptwrite = eptw;
goto out;
+ }
if (max_insn_cnt && insn_cnt >= max_insn_cnt)
goto out_no_cache;
@@ -1300,6 +1346,7 @@ static void intel_pt_free_queue(void *priv)
if (!ptq)
return;
thread__zput(ptq->thread);
+ thread__zput(ptq->guest_thread);
thread__zput(ptq->unknown_guest_thread);
intel_pt_decoder_free(ptq->decoder);
zfree(&ptq->event_buf);
@@ -2372,6 +2419,10 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
ptq->sample_ipc = ptq->state->flags & INTEL_PT_SAMPLE_IPC;
}
+ /* Ensure guest code maps are set up */
+ if (symbol_conf.guest_code && (state->from_nr || state->to_nr))
+ intel_pt_get_guest(ptq);
+
/*
* Do PEBS first to allow for the possibility that the PEBS timestamp
* precedes the current timestamp.
diff --git a/tools/perf/util/libunwind/arm64.c b/tools/perf/util/libunwind/arm64.c
index 15f60fd09424..014d82159656 100644
--- a/tools/perf/util/libunwind/arm64.c
+++ b/tools/perf/util/libunwind/arm64.c
@@ -24,7 +24,7 @@
#include "unwind.h"
#include "libunwind-aarch64.h"
#define perf_event_arm_regs perf_event_arm64_regs
-#include <../../../../arch/arm64/include/uapi/asm/perf_regs.h>
+#include <../../../arch/arm64/include/uapi/asm/perf_regs.h>
#undef perf_event_arm_regs
#include "../../arch/arm64/util/unwind-libunwind.c"
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 95391236f5f6..009061852808 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -84,6 +84,14 @@ static int machine__set_mmap_name(struct machine *machine)
return machine->mmap_name ? 0 : -ENOMEM;
}
+static void thread__set_guest_comm(struct thread *thread, pid_t pid)
+{
+ char comm[64];
+
+ snprintf(comm, sizeof(comm), "[guest/%d]", pid);
+ thread__set_comm(thread, comm, 0);
+}
+
int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
{
int err = -ENOMEM;
@@ -119,13 +127,11 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
if (pid != HOST_KERNEL_ID) {
struct thread *thread = machine__findnew_thread(machine, -1,
pid);
- char comm[64];
if (thread == NULL)
goto out;
- snprintf(comm, sizeof(comm), "[guest/%d]", pid);
- thread__set_comm(thread, comm, 0);
+ thread__set_guest_comm(thread, pid);
thread__put(thread);
}
@@ -299,6 +305,8 @@ struct machine *machines__add(struct machines *machines, pid_t pid,
rb_link_node(&machine->rb_node, parent, p);
rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
+ machine->machines = machines;
+
return machine;
}
@@ -384,6 +392,93 @@ struct machine *machines__find_guest(struct machines *machines, pid_t pid)
return machine;
}
+/*
+ * A common case for KVM test programs is that the test program acts as the
+ * hypervisor, creating, running and destroying the virtual machine, and
+ * providing the guest object code from its own object code. In this case,
+ * the VM is not running an OS, but only the functions loaded into it by the
+ * hypervisor test program, and conveniently, loaded at the same virtual
+ * addresses.
+ *
+ * Normally to resolve addresses, MMAP events are needed to map addresses
+ * back to the object code and debug symbols for that object code.
+ *
+ * Currently, there is no way to get such mapping information from guests
+ * but, in the scenario described above, the guest has the same mappings
+ * as the hypervisor, so support for that scenario can be achieved.
+ *
+ * To support that, copy the host thread's maps to the guest thread's maps.
+ * Note, we do not discover the guest until we encounter a guest event,
+ * which works well because it is not until then that we know that the host
+ * thread's maps have been set up.
+ *
+ * This function returns the guest thread. Apart from keeping the data
+ * structures sane, using a thread belonging to the guest machine, instead
+ * of the host thread, allows it to have its own comm (refer
+ * thread__set_guest_comm()).
+ */
+static struct thread *findnew_guest_code(struct machine *machine,
+ struct machine *host_machine,
+ pid_t pid)
+{
+ struct thread *host_thread;
+ struct thread *thread;
+ int err;
+
+ if (!machine)
+ return NULL;
+
+ thread = machine__findnew_thread(machine, -1, pid);
+ if (!thread)
+ return NULL;
+
+ /* Assume maps are set up if there are any */
+ if (thread->maps->nr_maps)
+ return thread;
+
+ host_thread = machine__find_thread(host_machine, -1, pid);
+ if (!host_thread)
+ goto out_err;
+
+ thread__set_guest_comm(thread, pid);
+
+ /*
+ * Guest code can be found in hypervisor process at the same address
+ * so copy host maps.
+ */
+ err = maps__clone(thread, host_thread->maps);
+ thread__put(host_thread);
+ if (err)
+ goto out_err;
+
+ return thread;
+
+out_err:
+ thread__zput(thread);
+ return NULL;
+}
+
+struct thread *machines__findnew_guest_code(struct machines *machines, pid_t pid)
+{
+ struct machine *host_machine = machines__find(machines, HOST_KERNEL_ID);
+ struct machine *machine = machines__findnew(machines, pid);
+
+ return findnew_guest_code(machine, host_machine, pid);
+}
+
+struct thread *machine__findnew_guest_code(struct machine *machine, pid_t pid)
+{
+ struct machines *machines = machine->machines;
+ struct machine *host_machine;
+
+ if (!machines)
+ return NULL;
+
+ host_machine = machines__find(machines, HOST_KERNEL_ID);
+
+ return findnew_guest_code(machine, host_machine, pid);
+}
+
void machines__process_guests(struct machines *machines,
machine__process_t process, void *data)
{
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 0023165422aa..5d7daf7cb7bc 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -18,6 +18,7 @@ struct symbol;
struct target;
struct thread;
union perf_event;
+struct machines;
/* Native host kernel uses -1 as pid index in machine */
#define HOST_KERNEL_ID (-1)
@@ -59,6 +60,7 @@ struct machine {
void *priv;
u64 db_id;
};
+ struct machines *machines;
bool trampolines_mapped;
};
@@ -162,10 +164,11 @@ void machines__process_guests(struct machines *machines,
struct machine *machines__add(struct machines *machines, pid_t pid,
const char *root_dir);
-struct machine *machines__find_host(struct machines *machines);
struct machine *machines__find(struct machines *machines, pid_t pid);
struct machine *machines__findnew(struct machines *machines, pid_t pid);
struct machine *machines__find_guest(struct machines *machines, pid_t pid);
+struct thread *machines__findnew_guest_code(struct machines *machines, pid_t pid);
+struct thread *machine__findnew_guest_code(struct machine *machine, pid_t pid);
void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size);
void machines__set_comm_exec(struct machines *machines, bool comm_exec);
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index ed0ab838bcc5..c3c21a9c350b 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -314,6 +314,30 @@ static const char * const mem_hops[] = {
"board",
};
+static int perf_mem__op_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
+{
+ u64 op = PERF_MEM_LOCK_NA;
+ int l;
+
+ if (mem_info)
+ op = mem_info->data_src.mem_op;
+
+ if (op & PERF_MEM_OP_NA)
+ l = scnprintf(out, sz, "N/A");
+ else if (op & PERF_MEM_OP_LOAD)
+ l = scnprintf(out, sz, "LOAD");
+ else if (op & PERF_MEM_OP_STORE)
+ l = scnprintf(out, sz, "STORE");
+ else if (op & PERF_MEM_OP_PFETCH)
+ l = scnprintf(out, sz, "PFETCH");
+ else if (op & PERF_MEM_OP_EXEC)
+ l = scnprintf(out, sz, "EXEC");
+ else
+ l = scnprintf(out, sz, "No");
+
+ return l;
+}
+
int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
{
size_t i, l = 0;
@@ -466,7 +490,10 @@ int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_in
{
int i = 0;
- i += perf_mem__lvl_scnprintf(out, sz, mem_info);
+ i += scnprintf(out, sz, "|OP ");
+ i += perf_mem__op_scnprintf(out + i, sz - i, mem_info);
+ i += scnprintf(out + i, sz - i, "|LVL ");
+ i += perf_mem__lvl_scnprintf(out + i, sz, mem_info);
i += scnprintf(out + i, sz - i, "|SNP ");
i += perf_mem__snp_scnprintf(out + i, sz - i, mem_info);
i += scnprintf(out + i, sz - i, "|TLB ");
@@ -582,6 +609,8 @@ do { \
}
if (lvl & P(LVL, MISS))
if (lvl & P(LVL, L1)) stats->st_l1miss++;
+ if (lvl & P(LVL, NA))
+ stats->st_na++;
} else {
/* unparsable data_src? */
stats->noparse++;
@@ -608,6 +637,7 @@ void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
stats->st_noadrs += add->st_noadrs;
stats->st_l1hit += add->st_l1hit;
stats->st_l1miss += add->st_l1miss;
+ stats->st_na += add->st_na;
stats->load += add->load;
stats->ld_excl += add->ld_excl;
stats->ld_shared += add->ld_shared;
diff --git a/tools/perf/util/mem-events.h b/tools/perf/util/mem-events.h
index 916242f8020a..8a8b568baeee 100644
--- a/tools/perf/util/mem-events.h
+++ b/tools/perf/util/mem-events.h
@@ -63,6 +63,7 @@ struct c2c_stats {
u32 st_noadrs; /* cacheable store with no address */
u32 st_l1hit; /* count of stores that hit L1D */
u32 st_l1miss; /* count of stores that miss L1D */
+ u32 st_na; /* count of stores with memory level is not available */
u32 load; /* count of all loads in trace */
u32 ld_excl; /* exclusive loads, rmt/lcl DRAM - snp none/miss */
u32 ld_shared; /* shared loads, rmt/lcl DRAM - snp hit */
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index d8492e339521..8f7baeabc5cf 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -728,22 +728,23 @@ static int metricgroup__build_event_string(struct strbuf *events,
{
struct hashmap_entry *cur;
size_t bkt;
- bool no_group = true, has_duration = false;
+ bool no_group = true, has_tool_events = false;
+ bool tool_events[PERF_TOOL_MAX] = {false};
int ret = 0;
#define RETURN_IF_NON_ZERO(x) do { if (x) return x; } while (0)
hashmap__for_each_entry(ctx->ids, cur, bkt) {
const char *sep, *rsep, *id = cur->key;
+ enum perf_tool_event ev;
pr_debug("found event %s\n", id);
- /*
- * Duration time maps to a software event and can make
- * groups not count. Always use it outside a
- * group.
- */
- if (!strcmp(id, "duration_time")) {
- has_duration = true;
+
+ /* Always move tool events outside of the group. */
+ ev = perf_tool_event__from_str(id);
+ if (ev != PERF_TOOL_NONE) {
+ has_tool_events = true;
+ tool_events[ev] = true;
continue;
}
/* Separate events with commas and open the group if necessary. */
@@ -802,16 +803,25 @@ static int metricgroup__build_event_string(struct strbuf *events,
RETURN_IF_NON_ZERO(ret);
}
}
- if (has_duration) {
- if (no_group) {
- /* Strange case of a metric of just duration_time. */
- ret = strbuf_addf(events, "duration_time");
- } else if (!has_constraint)
- ret = strbuf_addf(events, "}:W,duration_time");
- else
- ret = strbuf_addf(events, ",duration_time");
- } else if (!no_group && !has_constraint)
+ if (!no_group && !has_constraint) {
ret = strbuf_addf(events, "}:W");
+ RETURN_IF_NON_ZERO(ret);
+ }
+ if (has_tool_events) {
+ int i;
+
+ perf_tool_event__for_each_event(i) {
+ if (tool_events[i]) {
+ if (!no_group) {
+ ret = strbuf_addch(events, ',');
+ RETURN_IF_NON_ZERO(ret);
+ }
+ no_group = false;
+ ret = strbuf_addstr(events, perf_tool_event__to_str(i));
+ RETURN_IF_NON_ZERO(ret);
+ }
+ }
+ }
return ret;
#undef RETURN_IF_NON_ZERO
@@ -1117,7 +1127,7 @@ out:
/**
* metric_list_cmp - list_sort comparator that sorts metrics with more events to
- * the front. duration_time is excluded from the count.
+ * the front. tool events are excluded from the count.
*/
static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l,
const struct list_head *r)
@@ -1125,15 +1135,19 @@ static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l,
const struct metric *left = container_of(l, struct metric, nd);
const struct metric *right = container_of(r, struct metric, nd);
struct expr_id_data *data;
- int left_count, right_count;
+ int i, left_count, right_count;
left_count = hashmap__size(left->pctx->ids);
- if (!expr__get_id(left->pctx, "duration_time", &data))
- left_count--;
+ perf_tool_event__for_each_event(i) {
+ if (!expr__get_id(left->pctx, perf_tool_event__to_str(i), &data))
+ left_count--;
+ }
right_count = hashmap__size(right->pctx->ids);
- if (!expr__get_id(right->pctx, "duration_time", &data))
- right_count--;
+ perf_tool_event__for_each_event(i) {
+ if (!expr__get_id(right->pctx, perf_tool_event__to_str(i), &data))
+ right_count--;
+ }
return right_count - left_count;
}
@@ -1270,6 +1284,30 @@ static void metricgroup__free_metrics(struct list_head *metric_list)
}
/**
+ * find_tool_events - Search for the pressence of tool events in metric_list.
+ * @metric_list: List to take metrics from.
+ * @tool_events: Array of false values, indices corresponding to tool events set
+ * to true if tool event is found.
+ */
+static void find_tool_events(const struct list_head *metric_list,
+ bool tool_events[PERF_TOOL_MAX])
+{
+ struct metric *m;
+
+ list_for_each_entry(m, metric_list, nd) {
+ int i;
+
+ perf_tool_event__for_each_event(i) {
+ struct expr_id_data *data;
+
+ if (!tool_events[i] &&
+ !expr__get_id(m->pctx, perf_tool_event__to_str(i), &data))
+ tool_events[i] = true;
+ }
+ }
+}
+
+/**
* build_combined_expr_ctx - Make an expr_parse_ctx with all has_constraint
* metric IDs, as the IDs are held in a set,
* duplicates will be removed.
@@ -1318,11 +1356,14 @@ err_out:
* @ids: the event identifiers parsed from a metric.
* @modifier: any modifiers added to the events.
* @has_constraint: false if events should be placed in a weak group.
+ * @tool_events: entries set true if the tool event of index could be present in
+ * the overall list of metrics.
* @out_evlist: the created list of events.
*/
static int parse_ids(bool metric_no_merge, struct perf_pmu *fake_pmu,
struct expr_parse_ctx *ids, const char *modifier,
- bool has_constraint, struct evlist **out_evlist)
+ bool has_constraint, const bool tool_events[PERF_TOOL_MAX],
+ struct evlist **out_evlist)
{
struct parse_events_error parse_error;
struct evlist *parsed_evlist;
@@ -1331,26 +1372,38 @@ static int parse_ids(bool metric_no_merge, struct perf_pmu *fake_pmu,
*out_evlist = NULL;
if (!metric_no_merge || hashmap__size(ids->ids) == 0) {
- char *tmp;
+ bool added_event = false;
+ int i;
/*
- * We may fail to share events between metrics because
- * duration_time isn't present in one metric. For example, a
- * ratio of cache misses doesn't need duration_time but the same
- * events may be used for a misses per second. Events without
- * sharing implies multiplexing, that is best avoided, so place
- * duration_time in every group.
+ * We may fail to share events between metrics because a tool
+ * event isn't present in one metric. For example, a ratio of
+ * cache misses doesn't need duration_time but the same events
+ * may be used for a misses per second. Events without sharing
+ * implies multiplexing, that is best avoided, so place
+ * all tool events in every group.
*
* Also, there may be no ids/events in the expression parsing
* context because of constant evaluation, e.g.:
* event1 if #smt_on else 0
- * Add a duration_time event to avoid a parse error on an empty
- * string.
+ * Add a tool event to avoid a parse error on an empty string.
*/
- tmp = strdup("duration_time");
- if (!tmp)
- return -ENOMEM;
+ perf_tool_event__for_each_event(i) {
+ if (tool_events[i]) {
+ char *tmp = strdup(perf_tool_event__to_str(i));
+
+ if (!tmp)
+ return -ENOMEM;
+ ids__insert(ids->ids, tmp);
+ added_event = true;
+ }
+ }
+ if (!added_event && hashmap__size(ids->ids) == 0) {
+ char *tmp = strdup("duration_time");
- ids__insert(ids->ids, tmp);
+ if (!tmp)
+ return -ENOMEM;
+ ids__insert(ids->ids, tmp);
+ }
}
ret = metricgroup__build_event_string(&events, ids, modifier,
has_constraint);
@@ -1392,6 +1445,7 @@ static int parse_groups(struct evlist *perf_evlist, const char *str,
struct evlist *combined_evlist = NULL;
LIST_HEAD(metric_list);
struct metric *m;
+ bool tool_events[PERF_TOOL_MAX] = {false};
int ret;
if (metric_events_list->nr_entries == 0)
@@ -1407,12 +1461,15 @@ static int parse_groups(struct evlist *perf_evlist, const char *str,
if (!metric_no_merge) {
struct expr_parse_ctx *combined = NULL;
+ find_tool_events(&metric_list, tool_events);
+
ret = build_combined_expr_ctx(&metric_list, &combined);
if (!ret && combined && hashmap__size(combined->ids)) {
ret = parse_ids(metric_no_merge, fake_pmu, combined,
/*modifier=*/NULL,
/*has_constraint=*/true,
+ tool_events,
&combined_evlist);
}
if (combined)
@@ -1460,7 +1517,7 @@ static int parse_groups(struct evlist *perf_evlist, const char *str,
}
if (!metric_evlist) {
ret = parse_ids(metric_no_merge, fake_pmu, m->pctx, m->modifier,
- m->has_constraint, &m->evlist);
+ m->has_constraint, tool_events, &m->evlist);
if (ret)
goto out;
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 50502b4a7ca4..a4dff881be39 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -62,8 +62,8 @@ void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_u
void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
struct evlist *evlist __maybe_unused,
- int idx __maybe_unused,
- bool per_cpu __maybe_unused)
+ struct evsel *evsel __maybe_unused,
+ int idx __maybe_unused)
{
}
diff --git a/tools/perf/util/off_cpu.h b/tools/perf/util/off_cpu.h
new file mode 100644
index 000000000000..2dd67c60f211
--- /dev/null
+++ b/tools/perf/util/off_cpu.h
@@ -0,0 +1,38 @@
+#ifndef PERF_UTIL_OFF_CPU_H
+#define PERF_UTIL_OFF_CPU_H
+
+#include <linux/perf_event.h>
+
+struct evlist;
+struct target;
+struct perf_session;
+struct record_opts;
+
+#define OFFCPU_EVENT "offcpu-time"
+
+#define OFFCPU_SAMPLE_TYPES (PERF_SAMPLE_IDENTIFIER | PERF_SAMPLE_IP | \
+ PERF_SAMPLE_TID | PERF_SAMPLE_TIME | \
+ PERF_SAMPLE_ID | PERF_SAMPLE_CPU | \
+ PERF_SAMPLE_PERIOD | PERF_SAMPLE_CALLCHAIN | \
+ PERF_SAMPLE_CGROUP)
+
+
+#ifdef HAVE_BPF_SKEL
+int off_cpu_prepare(struct evlist *evlist, struct target *target,
+ struct record_opts *opts);
+int off_cpu_write(struct perf_session *session);
+#else
+static inline int off_cpu_prepare(struct evlist *evlist __maybe_unused,
+ struct target *target __maybe_unused,
+ struct record_opts *opts __maybe_unused)
+{
+ return -1;
+}
+
+static inline int off_cpu_write(struct perf_session *session __maybe_unused)
+{
+ return -1;
+}
+#endif
+
+#endif /* PERF_UTIL_OFF_CPU_H */
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index dd84fed698a3..7ed235740431 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -154,6 +154,21 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
},
};
+struct event_symbol event_symbols_tool[PERF_TOOL_MAX] = {
+ [PERF_TOOL_DURATION_TIME] = {
+ .symbol = "duration_time",
+ .alias = "",
+ },
+ [PERF_TOOL_USER_TIME] = {
+ .symbol = "user_time",
+ .alias = "",
+ },
+ [PERF_TOOL_SYSTEM_TIME] = {
+ .symbol = "system_time",
+ .alias = "",
+ },
+};
+
#define __PERF_EVENT_FIELD(config, name) \
((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
@@ -350,7 +365,7 @@ __add_event(struct list_head *list, int *idx,
(*idx)++;
evsel->core.cpus = cpus;
evsel->core.own_cpus = perf_cpu_map__get(cpus);
- evsel->core.system_wide = pmu ? pmu->is_uncore : false;
+ evsel->core.requires_cpu = pmu ? pmu->is_uncore : false;
evsel->auto_merge_stats = auto_merge_stats;
if (name)
@@ -402,14 +417,16 @@ static int add_event_tool(struct list_head *list, int *idx,
if (!evsel)
return -ENOMEM;
evsel->tool_event = tool_event;
- if (tool_event == PERF_TOOL_DURATION_TIME) {
+ if (tool_event == PERF_TOOL_DURATION_TIME
+ || tool_event == PERF_TOOL_USER_TIME
+ || tool_event == PERF_TOOL_SYSTEM_TIME) {
free((char *)evsel->unit);
evsel->unit = strdup("ns");
}
return 0;
}
-static int parse_aliases(char *str, const char *names[][EVSEL__MAX_ALIASES], int size)
+static int parse_aliases(char *str, const char *const names[][EVSEL__MAX_ALIASES], int size)
{
int i, j;
int n, longest = -1;
@@ -3056,21 +3073,34 @@ out_enomem:
return evt_num;
}
-static void print_tool_event(const char *name, const char *event_glob,
+static void print_tool_event(const struct event_symbol *syms, const char *event_glob,
bool name_only)
{
- if (event_glob && !strglobmatch(name, event_glob))
+ if (syms->symbol == NULL)
+ return;
+
+ if (event_glob && !(strglobmatch(syms->symbol, event_glob) ||
+ (syms->alias && strglobmatch(syms->alias, event_glob))))
return;
+
if (name_only)
- printf("%s ", name);
- else
+ printf("%s ", syms->symbol);
+ else {
+ char name[MAX_NAME_LEN];
+ if (syms->alias && strlen(syms->alias))
+ snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
+ else
+ strlcpy(name, syms->symbol, MAX_NAME_LEN);
printf(" %-50s [%s]\n", name, "Tool event");
-
+ }
}
void print_tool_events(const char *event_glob, bool name_only)
{
- print_tool_event("duration_time", event_glob, name_only);
+ // Start at 1 because the first enum entry symbols no tool event
+ for (int i = 1; i < PERF_TOOL_MAX; ++i) {
+ print_tool_event(event_symbols_tool + i, event_glob, name_only);
+ }
if (pager_in_use())
printf("\n");
}
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 5b6e4b5249cf..3a9ce96c8bce 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -353,6 +353,8 @@ alignment-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_AL
emulation-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
dummy { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); }
duration_time { return tool(yyscanner, PERF_TOOL_DURATION_TIME); }
+user_time { return tool(yyscanner, PERF_TOOL_USER_TIME); }
+system_time { return tool(yyscanner, PERF_TOOL_SYSTEM_TIME); }
bpf-output { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_BPF_OUTPUT); }
cgroup-switches { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CGROUP_SWITCHES); }
diff --git a/tools/perf/util/path.c b/tools/perf/util/path.c
index caed0336429f..ce80b79be103 100644
--- a/tools/perf/util/path.c
+++ b/tools/perf/util/path.c
@@ -86,9 +86,21 @@ bool is_directory(const char *base_path, const struct dirent *dent)
char path[PATH_MAX];
struct stat st;
- sprintf(path, "%s/%s", base_path, dent->d_name);
+ snprintf(path, sizeof(path), "%s/%s", base_path, dent->d_name);
if (stat(path, &st))
return false;
return S_ISDIR(st.st_mode);
}
+
+bool is_executable_file(const char *base_path, const struct dirent *dent)
+{
+ char path[PATH_MAX];
+ struct stat st;
+
+ snprintf(path, sizeof(path), "%s/%s", base_path, dent->d_name);
+ if (stat(path, &st))
+ return false;
+
+ return !S_ISDIR(st.st_mode) && (st.st_mode & S_IXUSR);
+}
diff --git a/tools/perf/util/path.h b/tools/perf/util/path.h
index 083429b7efa3..d94902c22222 100644
--- a/tools/perf/util/path.h
+++ b/tools/perf/util/path.h
@@ -12,5 +12,6 @@ int path__join3(char *bf, size_t size, const char *path1, const char *path2, con
bool is_regular_file(const char *file);
bool is_directory(const char *base_path, const struct dirent *dent);
+bool is_executable_file(const char *base_path, const struct dirent *dent);
#endif /* _PERF_PATH_H */
diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c
index a982e40ee5a9..872dd3d38782 100644
--- a/tools/perf/util/perf_regs.c
+++ b/tools/perf/util/perf_regs.c
@@ -103,6 +103,8 @@ static const char *__perf_reg_name_arm64(int id)
return "lr";
case PERF_REG_ARM64_PC:
return "pc";
+ case PERF_REG_ARM64_VG:
+ return "vg";
default:
return NULL;
}
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index a685d20165f7..aa5156c2bcff 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -38,5 +38,6 @@ util/units.c
util/affinity.c
util/rwsem.c
util/hashmap.c
+util/perf_regs.c
util/pmu-hybrid.c
util/fncache.c
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 413f2d19c13f..adba01b7d9dd 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -392,6 +392,18 @@ static const char *get_dsoname(struct map *map)
return dsoname;
}
+static unsigned long get_offset(struct symbol *sym, struct addr_location *al)
+{
+ unsigned long offset;
+
+ if (al->addr < sym->end)
+ offset = al->addr - sym->start;
+ else
+ offset = al->addr - al->map->start - sym->start;
+
+ return offset;
+}
+
static PyObject *python_process_callchain(struct perf_sample *sample,
struct evsel *evsel,
struct addr_location *al)
@@ -443,6 +455,25 @@ static PyObject *python_process_callchain(struct perf_sample *sample,
_PyUnicode_FromStringAndSize(node->ms.sym->name,
node->ms.sym->namelen));
pydict_set_item_string_decref(pyelem, "sym", pysym);
+
+ if (node->ms.map) {
+ struct map *map = node->ms.map;
+ struct addr_location node_al;
+ unsigned long offset;
+
+ node_al.addr = map->map_ip(map, node->ip);
+ node_al.map = map;
+ offset = get_offset(node->ms.sym, &node_al);
+
+ pydict_set_item_string_decref(
+ pyelem, "sym_off",
+ PyLong_FromUnsignedLongLong(offset));
+ }
+ if (node->srcline && strcmp(":0", node->srcline)) {
+ pydict_set_item_string_decref(
+ pyelem, "sym_srcline",
+ _PyUnicode_FromString(node->srcline));
+ }
}
if (node->ms.map) {
@@ -520,18 +551,6 @@ exit:
return pylist;
}
-static unsigned long get_offset(struct symbol *sym, struct addr_location *al)
-{
- unsigned long offset;
-
- if (al->addr < sym->end)
- offset = al->addr - sym->start;
- else
- offset = al->addr - al->map->start - sym->start;
-
- return offset;
-}
-
static int get_symoff(struct symbol *sym, struct addr_location *al,
bool print_off, char *bf, int size)
{
@@ -736,12 +755,22 @@ static void set_regs_in_dict(PyObject *dict,
}
static void set_sym_in_dict(PyObject *dict, struct addr_location *al,
- const char *dso_field, const char *sym_field,
- const char *symoff_field)
+ const char *dso_field, const char *dso_bid_field,
+ const char *dso_map_start, const char *dso_map_end,
+ const char *sym_field, const char *symoff_field)
{
+ char sbuild_id[SBUILD_ID_SIZE];
+
if (al->map) {
pydict_set_item_string_decref(dict, dso_field,
_PyUnicode_FromString(al->map->dso->name));
+ build_id__sprintf(&al->map->dso->bid, sbuild_id);
+ pydict_set_item_string_decref(dict, dso_bid_field,
+ _PyUnicode_FromString(sbuild_id));
+ pydict_set_item_string_decref(dict, dso_map_start,
+ PyLong_FromUnsignedLong(al->map->start));
+ pydict_set_item_string_decref(dict, dso_map_end,
+ PyLong_FromUnsignedLong(al->map->end));
}
if (al->sym) {
pydict_set_item_string_decref(dict, sym_field,
@@ -821,7 +850,8 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
(const char *)sample->raw_data, sample->raw_size));
pydict_set_item_string_decref(dict, "comm",
_PyUnicode_FromString(thread__comm_str(al->thread)));
- set_sym_in_dict(dict, al, "dso", "symbol", "symoff");
+ set_sym_in_dict(dict, al, "dso", "dso_bid", "dso_map_start", "dso_map_end",
+ "symbol", "symoff");
pydict_set_item_string_decref(dict, "callchain", callchain);
@@ -837,7 +867,9 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
if (addr_al) {
pydict_set_item_string_decref(dict_sample, "addr_correlates_sym",
PyBool_FromLong(1));
- set_sym_in_dict(dict_sample, addr_al, "addr_dso", "addr_symbol", "addr_symoff");
+ set_sym_in_dict(dict_sample, addr_al, "addr_dso", "addr_dso_bid",
+ "addr_dso_map_start", "addr_dso_map_end",
+ "addr_symbol", "addr_symoff");
}
if (sample->flags)
@@ -2074,7 +2106,11 @@ static int python_generate_script(struct tep_handle *pevent, const char *outfile
fprintf(ofp, "\t\tfor node in common_callchain:");
fprintf(ofp, "\n\t\t\tif 'sym' in node:");
- fprintf(ofp, "\n\t\t\t\tprint(\"\\t[%%x] %%s\" %% (node['ip'], node['sym']['name']))");
+ fprintf(ofp, "\n\t\t\t\tprint(\"\t[%%x] %%s%%s%%s%%s\" %% (");
+ fprintf(ofp, "\n\t\t\t\t\tnode['ip'], node['sym']['name'],");
+ fprintf(ofp, "\n\t\t\t\t\t\"+0x{:x}\".format(node['sym_off']) if 'sym_off' in node else \"\",");
+ fprintf(ofp, "\n\t\t\t\t\t\" ({})\".format(node['dso']) if 'dso' in node else \"\",");
+ fprintf(ofp, "\n\t\t\t\t\t\" \" + node['sym_srcline'] if 'sym_srcline' in node else \"\"))");
fprintf(ofp, "\n\t\t\telse:");
fprintf(ofp, "\n\t\t\t\tprint(\"\t[%%x]\" %% (node['ip']))\n\n");
fprintf(ofp, "\t\tprint()\n\n");
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index a7f93f5a1ac8..0aa818977d2b 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1426,6 +1426,13 @@ static struct machine *machines__find_for_cpumode(struct machines *machines,
else
pid = sample->pid;
+ /*
+ * Guest code machine is created as needed and does not use
+ * DEFAULT_GUEST_KERNEL_ID.
+ */
+ if (symbol_conf.guest_code)
+ return machines__findnew(machines, pid);
+
return machines__find_guest(machines, pid);
}
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index 138e3ab9d638..606f09b09226 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -610,6 +610,19 @@ static bool hybrid_uniquify(struct evsel *evsel)
return perf_pmu__has_hybrid() && !is_uncore(evsel);
}
+static bool hybrid_merge(struct evsel *counter, struct perf_stat_config *config,
+ bool check)
+{
+ if (hybrid_uniquify(counter)) {
+ if (check)
+ return config && config->hybrid_merge;
+ else
+ return config && !config->hybrid_merge;
+ }
+
+ return false;
+}
+
static bool collect_data(struct perf_stat_config *config, struct evsel *counter,
void (*cb)(struct perf_stat_config *config, struct evsel *counter, void *data,
bool first),
@@ -618,9 +631,9 @@ static bool collect_data(struct perf_stat_config *config, struct evsel *counter,
if (counter->merged_stat)
return false;
cb(config, counter, data, true);
- if (config->no_merge || hybrid_uniquify(counter))
+ if (config->no_merge || hybrid_merge(counter, config, false))
uniquify_event_name(counter);
- else if (counter->auto_merge_stats)
+ else if (counter->auto_merge_stats || hybrid_merge(counter, config, true))
collect_all_aliases(config, counter, cb, data);
return true;
}
@@ -751,11 +764,11 @@ static int cmp_val(const void *a, const void *b)
static struct perf_aggr_thread_value *sort_aggr_thread(
struct evsel *counter,
- int nthreads, int ncpus,
int *ret,
struct target *_target)
{
- int cpu, thread, i = 0;
+ int nthreads = perf_thread_map__nr(counter->core.threads);
+ int i = 0;
double uval;
struct perf_aggr_thread_value *buf;
@@ -763,13 +776,17 @@ static struct perf_aggr_thread_value *sort_aggr_thread(
if (!buf)
return NULL;
- for (thread = 0; thread < nthreads; thread++) {
+ for (int thread = 0; thread < nthreads; thread++) {
+ int idx;
u64 ena = 0, run = 0, val = 0;
- for (cpu = 0; cpu < ncpus; cpu++) {
- val += perf_counts(counter->counts, cpu, thread)->val;
- ena += perf_counts(counter->counts, cpu, thread)->ena;
- run += perf_counts(counter->counts, cpu, thread)->run;
+ perf_cpu_map__for_each_idx(idx, evsel__cpus(counter)) {
+ struct perf_counts_values *counts =
+ perf_counts(counter->counts, idx, thread);
+
+ val += counts->val;
+ ena += counts->ena;
+ run += counts->run;
}
uval = val * counter->scale;
@@ -804,13 +821,11 @@ static void print_aggr_thread(struct perf_stat_config *config,
struct evsel *counter, char *prefix)
{
FILE *output = config->output;
- int nthreads = perf_thread_map__nr(counter->core.threads);
- int ncpus = perf_cpu_map__nr(counter->core.cpus);
int thread, sorted_threads;
struct aggr_cpu_id id;
struct perf_aggr_thread_value *buf;
- buf = sort_aggr_thread(counter, nthreads, ncpus, &sorted_threads, _target);
+ buf = sort_aggr_thread(counter, &sorted_threads, _target);
if (!buf) {
perror("cannot sort aggr thread");
return;
@@ -933,8 +948,6 @@ static void print_no_aggr_metric(struct perf_stat_config *config,
struct evsel *counter;
bool first = true;
- if (prefix)
- fputs(prefix, config->output);
evlist__for_each_entry(evlist, counter) {
u64 ena, run, val;
double uval;
@@ -946,6 +959,8 @@ static void print_no_aggr_metric(struct perf_stat_config *config,
id = aggr_cpu_id__cpu(cpu, /*data=*/NULL);
if (first) {
+ if (prefix)
+ fputs(prefix, config->output);
aggr_printout(config, counter, id, 0);
first = false;
}
@@ -957,7 +972,8 @@ static void print_no_aggr_metric(struct perf_stat_config *config,
printout(config, id, 0, counter, uval, prefix,
run, ena, 1.0, &rt_stat);
}
- fputc('\n', config->output);
+ if (!first)
+ fputc('\n', config->output);
}
}
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 10af7804e482..979c8cb918f7 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -26,6 +26,7 @@
struct runtime_stat rt_stat;
struct stats walltime_nsecs_stats;
+struct rusage_stats ru_stats;
struct saved_value {
struct rb_node rb_node;
@@ -199,6 +200,7 @@ void perf_stat__reset_shadow_stats(void)
{
reset_stat(&rt_stat);
memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
+ memset(&ru_stats, 0, sizeof(ru_stats));
}
void perf_stat__reset_shadow_per_stat(struct runtime_stat *st)
@@ -831,10 +833,31 @@ static int prepare_metric(struct evsel **metric_events,
u64 metric_total = 0;
int source_count;
- if (!strcmp(metric_events[i]->name, "duration_time")) {
- stats = &walltime_nsecs_stats;
- scale = 1e-9;
+ if (evsel__is_tool(metric_events[i])) {
source_count = 1;
+ switch (metric_events[i]->tool_event) {
+ case PERF_TOOL_DURATION_TIME:
+ stats = &walltime_nsecs_stats;
+ scale = 1e-9;
+ break;
+ case PERF_TOOL_USER_TIME:
+ stats = &ru_stats.ru_utime_usec_stat;
+ scale = 1e-6;
+ break;
+ case PERF_TOOL_SYSTEM_TIME:
+ stats = &ru_stats.ru_stime_usec_stat;
+ scale = 1e-6;
+ break;
+ case PERF_TOOL_NONE:
+ pr_err("Invalid tool event 'none'");
+ abort();
+ case PERF_TOOL_MAX:
+ pr_err("Invalid tool event 'max'");
+ abort();
+ default:
+ pr_err("Unknown tool event '%s'", evsel__name(metric_events[i]));
+ abort();
+ }
} else {
v = saved_value_lookup(metric_events[i], cpu_map_idx, false,
STAT_NONE, 0, st,
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index c1af37e11f98..37ea2d044708 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -117,7 +117,9 @@ static void perf_stat_evsel_id_init(struct evsel *evsel)
/* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) {
- if (!strcmp(evsel__name(evsel), id_str[i])) {
+ if (!strcmp(evsel__name(evsel), id_str[i]) ||
+ (strstr(evsel__name(evsel), id_str[i]) && evsel->pmu_name
+ && strstr(evsel__name(evsel), evsel->pmu_name))) {
ps->id = i;
break;
}
@@ -235,14 +237,12 @@ void evlist__reset_prev_raw_counts(struct evlist *evlist)
static void evsel__copy_prev_raw_counts(struct evsel *evsel)
{
- int ncpus = evsel__nr_cpus(evsel);
- int nthreads = perf_thread_map__nr(evsel->core.threads);
+ int idx, nthreads = perf_thread_map__nr(evsel->core.threads);
for (int thread = 0; thread < nthreads; thread++) {
- for (int cpu = 0; cpu < ncpus; cpu++) {
- *perf_counts(evsel->counts, cpu, thread) =
- *perf_counts(evsel->prev_raw_counts, cpu,
- thread);
+ perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) {
+ *perf_counts(evsel->counts, idx, thread) =
+ *perf_counts(evsel->prev_raw_counts, idx, thread);
}
}
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index 335d19cc3063..b5aeb8e6d34b 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -108,6 +108,11 @@ struct runtime_stat {
struct rblist value_list;
};
+struct rusage_stats {
+ struct stats ru_utime_usec_stat;
+ struct stats ru_stime_usec_stat;
+};
+
typedef struct aggr_cpu_id (*aggr_get_id_t)(struct perf_stat_config *config, struct perf_cpu cpu);
struct perf_stat_config {
@@ -122,6 +127,7 @@ struct perf_stat_config {
bool ru_display;
bool big_num;
bool no_merge;
+ bool hybrid_merge;
bool walltime_run_table;
bool all_kernel;
bool all_user;
@@ -148,6 +154,7 @@ struct perf_stat_config {
const char *csv_sep;
struct stats *walltime_nsecs_stats;
struct rusage ru_data;
+ struct rusage_stats *ru_stats;
struct cpu_aggr_map *aggr_map;
aggr_get_id_t aggr_get_id;
struct cpu_aggr_map *cpus_aggr_map;
@@ -177,6 +184,20 @@ static inline void init_stats(struct stats *stats)
stats->max = 0;
}
+static inline void init_rusage_stats(struct rusage_stats *ru_stats) {
+ init_stats(&ru_stats->ru_utime_usec_stat);
+ init_stats(&ru_stats->ru_stime_usec_stat);
+}
+
+static inline void update_rusage_stats(struct rusage_stats *ru_stats, struct rusage* rusage) {
+ const u64 us_to_ns = 1000;
+ const u64 s_to_ns = 1000000000;
+ update_stats(&ru_stats->ru_utime_usec_stat,
+ (rusage->ru_utime.tv_usec * us_to_ns + rusage->ru_utime.tv_sec * s_to_ns));
+ update_stats(&ru_stats->ru_stime_usec_stat,
+ (rusage->ru_stime.tv_usec * us_to_ns + rusage->ru_stime.tv_sec * s_to_ns));
+}
+
struct evsel;
struct evlist;
@@ -196,6 +217,7 @@ bool __perf_stat_evsel__is(struct evsel *evsel, enum perf_stat_evsel_id id);
extern struct runtime_stat rt_stat;
extern struct stats walltime_nsecs_stats;
+extern struct rusage_stats ru_stats;
typedef void (*print_metric_t)(struct perf_stat_config *config,
void *ctx, const char *color, const char *unit,
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index ecd377938eea..b3be5b1d9dbb 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -233,6 +233,33 @@ Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
return NULL;
}
+static int elf_read_program_header(Elf *elf, u64 vaddr, GElf_Phdr *phdr)
+{
+ size_t i, phdrnum;
+ u64 sz;
+
+ if (elf_getphdrnum(elf, &phdrnum))
+ return -1;
+
+ for (i = 0; i < phdrnum; i++) {
+ if (gelf_getphdr(elf, i, phdr) == NULL)
+ return -1;
+
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ sz = max(phdr->p_memsz, phdr->p_filesz);
+ if (!sz)
+ continue;
+
+ if (vaddr >= phdr->p_vaddr && (vaddr < phdr->p_vaddr + sz))
+ return 0;
+ }
+
+ /* Not found any valid program header */
+ return -1;
+}
+
static bool want_demangle(bool is_kernel_sym)
{
return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
@@ -1209,6 +1236,7 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
sym.st_value);
used_opd = true;
}
+
/*
* When loading symbols in a data mapping, ABS symbols (which
* has a value of SHN_ABS in its st_shndx) failed at
@@ -1227,6 +1255,17 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
gelf_getshdr(sec, &shdr);
+ /*
+ * If the attribute bit SHF_ALLOC is not set, the section
+ * doesn't occupy memory during process execution.
+ * E.g. ".gnu.warning.*" section is used by linker to generate
+ * warnings when calling deprecated functions, the symbols in
+ * the section aren't loaded to memory during process execution,
+ * so skip them.
+ */
+ if (!(shdr.sh_flags & SHF_ALLOC))
+ continue;
+
secstrs = secstrs_sym;
/*
@@ -1262,11 +1301,20 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
goto out_elf_end;
} else if ((used_opd && runtime_ss->adjust_symbols) ||
(!used_opd && syms_ss->adjust_symbols)) {
+ GElf_Phdr phdr;
+
+ if (elf_read_program_header(syms_ss->elf,
+ (u64)sym.st_value, &phdr)) {
+ pr_warning("%s: failed to find program header for "
+ "symbol: %s st_value: %#" PRIx64 "\n",
+ __func__, elf_name, (u64)sym.st_value);
+ continue;
+ }
pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
- "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
- (u64)sym.st_value, (u64)shdr.sh_addr,
- (u64)shdr.sh_offset);
- sym.st_value -= shdr.sh_addr - shdr.sh_offset;
+ "p_vaddr: %#" PRIx64 " p_offset: %#" PRIx64 "\n",
+ __func__, (u64)sym.st_value, (u64)phdr.p_vaddr,
+ (u64)phdr.p_offset);
+ sym.st_value -= phdr.p_vaddr - phdr.p_offset;
}
demangled = demangle_sym(dso, kmodule, elf_name);
diff --git a/tools/perf/util/symbol_conf.h b/tools/perf/util/symbol_conf.h
index a70b3ec09dac..bc3d046fbb63 100644
--- a/tools/perf/util/symbol_conf.h
+++ b/tools/perf/util/symbol_conf.h
@@ -43,7 +43,8 @@ struct symbol_conf {
report_individual_block,
inline_name,
disable_add2line_warn,
- buildid_mmap2;
+ buildid_mmap2,
+ guest_code;
const char *vmlinux_name,
*kallsyms_name,
*source_prefix,
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index 27acdc5e5723..84d17bd4efae 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -754,7 +754,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
snprintf(filename, sizeof(filename), "%s/proc/%d/task",
machine->root_dir, pid);
- n = scandir(filename, &dirent, filter_task, alphasort);
+ n = scandir(filename, &dirent, filter_task, NULL);
if (n < 0)
return n;
@@ -767,11 +767,12 @@ static int __event__synthesize_thread(union perf_event *comm_event,
if (*end)
continue;
- rc = -1;
+ /* some threads may exit just after scan, ignore it */
if (perf_event__prepare_comm(comm_event, pid, _pid, machine,
&tgid, &ppid, &kernel_thread) != 0)
- break;
+ continue;
+ rc = -1;
if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
ppid, process, machine) < 0)
break;
@@ -987,7 +988,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
return 0;
snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
- n = scandir(proc_path, &dirent, filter_task, alphasort);
+ n = scandir(proc_path, &dirent, filter_task, NULL);
if (n < 0)
return err;
diff --git a/tools/perf/util/topdown.c b/tools/perf/util/topdown.c
index 1081b20f9891..a369f84ceb6a 100644
--- a/tools/perf/util/topdown.c
+++ b/tools/perf/util/topdown.c
@@ -1,18 +1,24 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include "pmu.h"
+#include "pmu-hybrid.h"
#include "topdown.h"
-int topdown_filter_events(const char **attr, char **str, bool use_group)
+int topdown_filter_events(const char **attr, char **str, bool use_group,
+ const char *pmu_name)
{
int off = 0;
int i;
int len = 0;
char *s;
+ bool is_hybrid = perf_pmu__is_hybrid(pmu_name);
for (i = 0; attr[i]; i++) {
- if (pmu_have_event("cpu", attr[i])) {
- len += strlen(attr[i]) + 1;
+ if (pmu_have_event(pmu_name, attr[i])) {
+ if (is_hybrid)
+ len += strlen(attr[i]) + strlen(pmu_name) + 3;
+ else
+ len += strlen(attr[i]) + 1;
attr[i - off] = attr[i];
} else
off++;
@@ -30,7 +36,10 @@ int topdown_filter_events(const char **attr, char **str, bool use_group)
if (use_group)
*s++ = '{';
for (i = 0; attr[i]; i++) {
- strcpy(s, attr[i]);
+ if (!is_hybrid)
+ strcpy(s, attr[i]);
+ else
+ sprintf(s, "%s/%s/", pmu_name, attr[i]);
s += strlen(s);
*s++ = ',';
}
diff --git a/tools/perf/util/topdown.h b/tools/perf/util/topdown.h
index 2f0d0b887639..118e75281f93 100644
--- a/tools/perf/util/topdown.h
+++ b/tools/perf/util/topdown.h
@@ -7,6 +7,7 @@ bool arch_topdown_check_group(bool *warn);
void arch_topdown_group_warn(void);
bool arch_topdown_sample_read(struct evsel *leader);
-int topdown_filter_events(const char **attr, char **str, bool use_group);
+int topdown_filter_events(const char **attr, char **str, bool use_group,
+ const char *pmu_name);
#endif
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 41e29fc7648a..81b6bd6e1536 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -169,29 +169,63 @@ static int __dw_read_encoded_value(u8 **p, u8 *end, u64 *val,
__v; \
})
-static u64 elf_section_offset(int fd, const char *name)
+static int elf_section_address_and_offset(int fd, const char *name, u64 *address, u64 *offset)
{
Elf *elf;
GElf_Ehdr ehdr;
GElf_Shdr shdr;
- u64 offset = 0;
+ int ret = -1;
elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
if (elf == NULL)
+ return -1;
+
+ if (gelf_getehdr(elf, &ehdr) == NULL)
+ goto out_err;
+
+ if (!elf_section_by_name(elf, &ehdr, &shdr, name, NULL))
+ goto out_err;
+
+ *address = shdr.sh_addr;
+ *offset = shdr.sh_offset;
+ ret = 0;
+out_err:
+ elf_end(elf);
+ return ret;
+}
+
+#ifndef NO_LIBUNWIND_DEBUG_FRAME
+static u64 elf_section_offset(int fd, const char *name)
+{
+ u64 address, offset = 0;
+
+ if (elf_section_address_and_offset(fd, name, &address, &offset))
return 0;
- do {
- if (gelf_getehdr(elf, &ehdr) == NULL)
- break;
+ return offset;
+}
+#endif
- if (!elf_section_by_name(elf, &ehdr, &shdr, name, NULL))
- break;
+static u64 elf_base_address(int fd)
+{
+ Elf *elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ GElf_Phdr phdr;
+ u64 retval = 0;
+ size_t i, phdrnum = 0;
- offset = shdr.sh_offset;
- } while (0);
+ if (elf == NULL)
+ return 0;
+ (void)elf_getphdrnum(elf, &phdrnum);
+ /* PT_LOAD segments are sorted by p_vaddr, so the first has the minimum p_vaddr. */
+ for (i = 0; i < phdrnum; i++) {
+ if (gelf_getphdr(elf, i, &phdr) && phdr.p_type == PT_LOAD) {
+ retval = phdr.p_vaddr & -getpagesize();
+ break;
+ }
+ }
elf_end(elf);
- return offset;
+ return retval;
}
#ifndef NO_LIBUNWIND_DEBUG_FRAME
@@ -248,8 +282,7 @@ struct eh_frame_hdr {
} __packed;
static int unwind_spec_ehframe(struct dso *dso, struct machine *machine,
- u64 offset, u64 *table_data, u64 *segbase,
- u64 *fde_count)
+ u64 offset, u64 *table_data_offset, u64 *fde_count)
{
struct eh_frame_hdr hdr;
u8 *enc = (u8 *) &hdr.enc;
@@ -265,35 +298,47 @@ static int unwind_spec_ehframe(struct dso *dso, struct machine *machine,
dw_read_encoded_value(enc, end, hdr.eh_frame_ptr_enc);
*fde_count = dw_read_encoded_value(enc, end, hdr.fde_count_enc);
- *segbase = offset;
- *table_data = (enc - (u8 *) &hdr) + offset;
+ *table_data_offset = enc - (u8 *) &hdr;
return 0;
}
-static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
+static int read_unwind_spec_eh_frame(struct dso *dso, struct unwind_info *ui,
u64 *table_data, u64 *segbase,
u64 *fde_count)
{
- int ret = -EINVAL, fd;
- u64 offset = dso->data.eh_frame_hdr_offset;
+ struct map *map;
+ u64 base_addr = UINT64_MAX;
+ int ret, fd;
- if (offset == 0) {
- fd = dso__data_get_fd(dso, machine);
+ if (dso->data.eh_frame_hdr_offset == 0) {
+ fd = dso__data_get_fd(dso, ui->machine);
if (fd < 0)
return -EINVAL;
/* Check the .eh_frame section for unwinding info */
- offset = elf_section_offset(fd, ".eh_frame_hdr");
- dso->data.eh_frame_hdr_offset = offset;
+ ret = elf_section_address_and_offset(fd, ".eh_frame_hdr",
+ &dso->data.eh_frame_hdr_addr,
+ &dso->data.eh_frame_hdr_offset);
+ dso->data.elf_base_addr = elf_base_address(fd);
dso__data_put_fd(dso);
+ if (ret || dso->data.eh_frame_hdr_offset == 0)
+ return -EINVAL;
}
- if (offset)
- ret = unwind_spec_ehframe(dso, machine, offset,
- table_data, segbase,
- fde_count);
-
- return ret;
+ maps__for_each_entry(ui->thread->maps, map) {
+ if (map->dso == dso && map->start < base_addr)
+ base_addr = map->start;
+ }
+ base_addr -= dso->data.elf_base_addr;
+ /* Address of .eh_frame_hdr */
+ *segbase = base_addr + dso->data.eh_frame_hdr_addr;
+ ret = unwind_spec_ehframe(dso, ui->machine, dso->data.eh_frame_hdr_offset,
+ table_data, fde_count);
+ if (ret)
+ return ret;
+ /* binary_search_table offset plus .eh_frame_hdr address */
+ *table_data += *segbase;
+ return 0;
}
#ifndef NO_LIBUNWIND_DEBUG_FRAME
@@ -388,14 +433,14 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
pr_debug("unwind: find_proc_info dso %s\n", map->dso->name);
/* Check the .eh_frame section for unwinding info */
- if (!read_unwind_spec_eh_frame(map->dso, ui->machine,
+ if (!read_unwind_spec_eh_frame(map->dso, ui,
&table_data, &segbase, &fde_count)) {
memset(&di, 0, sizeof(di));
di.format = UNW_INFO_FORMAT_REMOTE_TABLE;
di.start_ip = map->start;
di.end_ip = map->end;
- di.u.rti.segbase = map->start + segbase - map->pgoff;
- di.u.rti.table_data = map->start + table_data - map->pgoff;
+ di.u.rti.segbase = segbase;
+ di.u.rti.table_data = table_data;
di.u.rti.table_len = fde_count * sizeof(struct table_entry)
/ sizeof(unw_word_t);
ret = dwarf_search_unwind_table(as, ip, &di, pi,
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index f8571a66d063..eeb83c80f458 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -430,6 +430,11 @@ void perf_debuginfod_setup(struct perf_debuginfod *di)
setenv("DEBUGINFOD_URLS", di->urls, 1);
pr_debug("DEBUGINFOD_URLS=%s\n", getenv("DEBUGINFOD_URLS"));
+
+#ifndef HAVE_DEBUGINFOD_SUPPORT
+ if (di->set)
+ pr_warning("WARNING: debuginfod support requested, but perf is not built with it\n");
+#endif
}
/*
diff --git a/tools/power/acpi/common/cmfsize.c b/tools/power/acpi/common/cmfsize.c
index 185b8c588e1d..38f9b9da8170 100644
--- a/tools/power/acpi/common/cmfsize.c
+++ b/tools/power/acpi/common/cmfsize.c
@@ -3,7 +3,7 @@
*
* Module Name: cmfsize - Common get file size function
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/common/getopt.c b/tools/power/acpi/common/getopt.c
index 3c265bc917a1..96fd6cec78e2 100644
--- a/tools/power/acpi/common/getopt.c
+++ b/tools/power/acpi/common/getopt.c
@@ -3,7 +3,7 @@
*
* Module Name: getopt
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
index ccabdbaae6a4..bd08f36df4a7 100644
--- a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
+++ b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
@@ -3,7 +3,7 @@
*
* Module Name: oslinuxtbl - Linux OSL for obtaining ACPI tables
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/os_specific/service_layers/osunixdir.c b/tools/power/acpi/os_specific/service_layers/osunixdir.c
index edd99274cd12..5107892d054b 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixdir.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixdir.c
@@ -3,7 +3,7 @@
*
* Module Name: osunixdir - Unix directory access interfaces
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/os_specific/service_layers/osunixmap.c b/tools/power/acpi/os_specific/service_layers/osunixmap.c
index fee0022560d5..6ff4edd8dc3b 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixmap.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixmap.c
@@ -3,7 +3,7 @@
*
* Module Name: osunixmap - Unix OSL for file mappings
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/os_specific/service_layers/osunixxf.c b/tools/power/acpi/os_specific/service_layers/osunixxf.c
index 0861728da562..b3651a04d68c 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixxf.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixxf.c
@@ -3,7 +3,7 @@
*
* Module Name: osunixxf - UNIX OSL interfaces
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/tools/acpidump/acpidump.h b/tools/power/acpi/tools/acpidump/acpidump.h
index e0ebc1dab1cc..153249c87fd7 100644
--- a/tools/power/acpi/tools/acpidump/acpidump.h
+++ b/tools/power/acpi/tools/acpidump/acpidump.h
@@ -3,7 +3,7 @@
*
* Module Name: acpidump.h - Include file for acpi_dump utility
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/tools/acpidump/apdump.c b/tools/power/acpi/tools/acpidump/apdump.c
index 444e3d78bd89..d54dde02b87d 100644
--- a/tools/power/acpi/tools/acpidump/apdump.c
+++ b/tools/power/acpi/tools/acpidump/apdump.c
@@ -3,7 +3,7 @@
*
* Module Name: apdump - Dump routines for ACPI tables (acpidump)
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/tools/acpidump/apfiles.c b/tools/power/acpi/tools/acpidump/apfiles.c
index da0c6e13042b..2d9b45a9b526 100644
--- a/tools/power/acpi/tools/acpidump/apfiles.c
+++ b/tools/power/acpi/tools/acpidump/apfiles.c
@@ -3,7 +3,7 @@
*
* Module Name: apfiles - File-related functions for acpidump utility
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c
index a4cf6042fcfd..44b23fc53dd9 100644
--- a/tools/power/acpi/tools/acpidump/apmain.c
+++ b/tools/power/acpi/tools/acpidump/apmain.c
@@ -3,7 +3,7 @@
*
* Module Name: apmain - Main module for the acpidump utility
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2022, Intel Corp.
*
*****************************************************************************/
diff --git a/tools/power/cpupower/debug/i386/dump_psb.c b/tools/power/cpupower/debug/i386/dump_psb.c
index 2c768cf70128..6fb81b42ea61 100644
--- a/tools/power/cpupower/debug/i386/dump_psb.c
+++ b/tools/power/cpupower/debug/i386/dump_psb.c
@@ -1,7 +1,5 @@
-/*
- * dump_psb. (c) 2004, Dave Jones, Red Hat Inc.
- * Licensed under the GPL v2.
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// dump_psb. (c) 2004, Dave Jones, Red Hat Inc.
#include <fcntl.h>
#include <stdio.h>
diff --git a/tools/power/pm-graph/README b/tools/power/pm-graph/README
index da468bd510ca..e6020c0d59ec 100644
--- a/tools/power/pm-graph/README
+++ b/tools/power/pm-graph/README
@@ -6,7 +6,7 @@
|_| |___/ |_|
pm-graph: suspend/resume/boot timing analysis tools
- Version: 5.8
+ Version: 5.9
Author: Todd Brandt <todd.e.brandt@intel.com>
Home Page: https://01.org/pm-graph
@@ -97,8 +97,8 @@
(kernel/pre-3.15/enable_trace_events_suspend_resume.patch)
(kernel/pre-3.15/enable_trace_events_device_pm_callback.patch)
- If you're using a kernel older than 3.15.0, the following
- additional kernel parameters are required:
+ If you're using bootgraph, or sleepgraph with a kernel older than 3.15.0,
+ the following additional kernel parameters are required:
(e.g. in file /etc/default/grub)
GRUB_CMDLINE_LINUX_DEFAULT="... initcall_debug log_buf_len=32M ..."
diff --git a/tools/power/pm-graph/bootgraph.py b/tools/power/pm-graph/bootgraph.py
index 2823cd3122f7..f96f50e0c336 100755
--- a/tools/power/pm-graph/bootgraph.py
+++ b/tools/power/pm-graph/bootgraph.py
@@ -69,22 +69,24 @@ class SystemValues(aslib.SystemValues):
bootloader = 'grub'
blexec = []
def __init__(self):
- self.hostname = platform.node()
+ self.kernel, self.hostname = 'unknown', platform.node()
self.testtime = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
if os.path.exists('/proc/version'):
fp = open('/proc/version', 'r')
- val = fp.read().strip()
+ self.kernel = self.kernelVersion(fp.read().strip())
fp.close()
- self.kernel = self.kernelVersion(val)
- else:
- self.kernel = 'unknown'
self.testdir = datetime.now().strftime('boot-%y%m%d-%H%M%S')
def kernelVersion(self, msg):
- return msg.split()[2]
+ m = re.match('^[Ll]inux *[Vv]ersion *(?P<v>\S*) .*', msg)
+ if m:
+ return m.group('v')
+ return 'unknown'
def checkFtraceKernelVersion(self):
- val = tuple(map(int, self.kernel.split('-')[0].split('.')))
- if val >= (4, 10, 0):
- return True
+ m = re.match('^(?P<x>[0-9]*)\.(?P<y>[0-9]*)\.(?P<z>[0-9]*).*', self.kernel)
+ if m:
+ val = tuple(map(int, m.groups()))
+ if val >= (4, 10, 0):
+ return True
return False
def kernelParams(self):
cmdline = 'initcall_debug log_buf_len=32M'
diff --git a/tools/power/pm-graph/config/custom-timeline-functions.cfg b/tools/power/pm-graph/config/custom-timeline-functions.cfg
index 962e5768681c..4f80ad7d7275 100644
--- a/tools/power/pm-graph/config/custom-timeline-functions.cfg
+++ b/tools/power/pm-graph/config/custom-timeline-functions.cfg
@@ -125,7 +125,7 @@ acpi_suspend_begin:
suspend_console:
acpi_pm_prepare:
syscore_suspend:
-arch_thaw_secondary_cpus_end:
+arch_enable_nonboot_cpus_end:
syscore_resume:
acpi_pm_finish:
resume_console:
diff --git a/tools/power/pm-graph/sleepgraph.py b/tools/power/pm-graph/sleepgraph.py
index ffd50953a024..33981adcdd68 100755
--- a/tools/power/pm-graph/sleepgraph.py
+++ b/tools/power/pm-graph/sleepgraph.py
@@ -66,8 +66,13 @@ from threading import Thread
from subprocess import call, Popen, PIPE
import base64
+debugtiming = False
+mystarttime = time.time()
def pprint(msg):
- print(msg)
+ if debugtiming:
+ print('[%09.3f] %s' % (time.time()-mystarttime, msg))
+ else:
+ print(msg)
sys.stdout.flush()
def ascii(text):
@@ -81,13 +86,14 @@ def ascii(text):
# store system values and test parameters
class SystemValues:
title = 'SleepGraph'
- version = '5.8'
+ version = '5.9'
ansi = False
rs = 0
display = ''
gzip = False
sync = False
wifi = False
+ netfix = False
verbose = False
testlog = True
dmesglog = True
@@ -108,6 +114,7 @@ class SystemValues:
cpucount = 0
memtotal = 204800
memfree = 204800
+ osversion = ''
srgap = 0
cgexp = False
testdir = ''
@@ -116,6 +123,7 @@ class SystemValues:
fpdtpath = '/sys/firmware/acpi/tables/FPDT'
epath = '/sys/kernel/debug/tracing/events/power/'
pmdpath = '/sys/power/pm_debug_messages'
+ s0ixpath = '/sys/module/intel_pmc_core/parameters/warn_on_s0ix_failures'
acpipath='/sys/module/acpi/parameters/debug_level'
traceevents = [
'suspend_resume',
@@ -156,6 +164,7 @@ class SystemValues:
ftop = False
usetraceevents = False
usetracemarkers = True
+ useftrace = True
usekprobes = True
usedevsrc = False
useprocmon = False
@@ -279,10 +288,16 @@ class SystemValues:
'intel_fbdev_set_suspend': {},
}
infocmds = [
+ [0, 'sysinfo', 'uname', '-a'],
+ [0, 'cpuinfo', 'head', '-7', '/proc/cpuinfo'],
[0, 'kparams', 'cat', '/proc/cmdline'],
[0, 'mcelog', 'mcelog'],
[0, 'pcidevices', 'lspci', '-tv'],
- [0, 'usbdevices', 'lsusb', '-t'],
+ [0, 'usbdevices', 'lsusb', '-tv'],
+ [0, 'acpidevices', 'sh', '-c', 'ls -l /sys/bus/acpi/devices/*/physical_node'],
+ [0, 's0ix_require', 'cat', '/sys/kernel/debug/pmc_core/substate_requirements'],
+ [0, 's0ix_debug', 'cat', '/sys/kernel/debug/pmc_core/slp_s0_debug_status'],
+ [1, 's0ix_residency', 'cat', '/sys/kernel/debug/pmc_core/slp_s0_residency_usec'],
[1, 'interrupts', 'cat', '/proc/interrupts'],
[1, 'wakeups', 'cat', '/sys/kernel/debug/wakeup_sources'],
[2, 'gpecounts', 'sh', '-c', 'grep -v invalid /sys/firmware/acpi/interrupts/*'],
@@ -358,8 +373,19 @@ class SystemValues:
self.outputResult({'error':msg})
sys.exit(1)
return False
- def usable(self, file):
- return (os.path.exists(file) and os.path.getsize(file) > 0)
+ def usable(self, file, ishtml=False):
+ if not os.path.exists(file) or os.path.getsize(file) < 1:
+ return False
+ if ishtml:
+ try:
+ fp = open(file, 'r')
+ res = fp.read(1000)
+ fp.close()
+ except:
+ return False
+ if '<html>' not in res:
+ return False
+ return True
def getExec(self, cmd):
try:
fp = Popen(['which', cmd], stdout=PIPE, stderr=PIPE).stdout
@@ -413,12 +439,16 @@ class SystemValues:
r = info['bios-release-date'] if 'bios-release-date' in info else ''
self.sysstamp = '# sysinfo | man:%s | plat:%s | cpu:%s | bios:%s | biosdate:%s | numcpu:%d | memsz:%d | memfr:%d' % \
(m, p, c, b, r, self.cpucount, self.memtotal, self.memfree)
+ if self.osversion:
+ self.sysstamp += ' | os:%s' % self.osversion
def printSystemInfo(self, fatal=False):
self.rootCheck(True)
out = dmidecode(self.mempath, fatal)
if len(out) < 1:
return
fmt = '%-24s: %s'
+ if self.osversion:
+ print(fmt % ('os-version', self.osversion))
for name in sorted(out):
print(fmt % (name, out[name]))
print(fmt % ('cpucount', ('%d' % self.cpucount)))
@@ -426,20 +456,25 @@ class SystemValues:
print(fmt % ('memfree', ('%d kB' % self.memfree)))
def cpuInfo(self):
self.cpucount = 0
- fp = open('/proc/cpuinfo', 'r')
- for line in fp:
- if re.match('^processor[ \t]*:[ \t]*[0-9]*', line):
- self.cpucount += 1
- fp.close()
- fp = open('/proc/meminfo', 'r')
- for line in fp:
- m = re.match('^MemTotal:[ \t]*(?P<sz>[0-9]*) *kB', line)
- if m:
- self.memtotal = int(m.group('sz'))
- m = re.match('^MemFree:[ \t]*(?P<sz>[0-9]*) *kB', line)
- if m:
- self.memfree = int(m.group('sz'))
- fp.close()
+ if os.path.exists('/proc/cpuinfo'):
+ with open('/proc/cpuinfo', 'r') as fp:
+ for line in fp:
+ if re.match('^processor[ \t]*:[ \t]*[0-9]*', line):
+ self.cpucount += 1
+ if os.path.exists('/proc/meminfo'):
+ with open('/proc/meminfo', 'r') as fp:
+ for line in fp:
+ m = re.match('^MemTotal:[ \t]*(?P<sz>[0-9]*) *kB', line)
+ if m:
+ self.memtotal = int(m.group('sz'))
+ m = re.match('^MemFree:[ \t]*(?P<sz>[0-9]*) *kB', line)
+ if m:
+ self.memfree = int(m.group('sz'))
+ if os.path.exists('/etc/os-release'):
+ with open('/etc/os-release', 'r') as fp:
+ for line in fp:
+ if line.startswith('PRETTY_NAME='):
+ self.osversion = line[12:].strip().replace('"', '')
def initTestOutput(self, name):
self.prefix = self.hostname
v = open('/proc/version', 'r').read().strip()
@@ -698,6 +733,8 @@ class SystemValues:
return False
return True
def fsetVal(self, val, path):
+ if not self.useftrace:
+ return False
return self.setVal(val, self.tpath+path)
def getVal(self, file):
res = ''
@@ -711,9 +748,11 @@ class SystemValues:
pass
return res
def fgetVal(self, path):
+ if not self.useftrace:
+ return ''
return self.getVal(self.tpath+path)
def cleanupFtrace(self):
- if(self.usecallgraph or self.usetraceevents or self.usedevsrc):
+ if self.useftrace:
self.fsetVal('0', 'events/kprobes/enable')
self.fsetVal('', 'kprobe_events')
self.fsetVal('1024', 'buffer_size_kb')
@@ -734,13 +773,14 @@ class SystemValues:
return True
return False
def initFtrace(self, quiet=False):
+ if not self.useftrace:
+ return
if not quiet:
sysvals.printSystemInfo(False)
pprint('INITIALIZING FTRACE...')
# turn trace off
self.fsetVal('0', 'tracing_on')
self.cleanupFtrace()
- self.testVal(self.pmdpath, 'basic', '1')
# set the trace clock to global
self.fsetVal('global', 'trace_clock')
self.fsetVal('nop', 'current_tracer')
@@ -766,6 +806,10 @@ class SystemValues:
# set trace type
self.fsetVal('function_graph', 'current_tracer')
self.fsetVal('', 'set_ftrace_filter')
+ # temporary hack to fix https://bugzilla.kernel.org/show_bug.cgi?id=212761
+ fp = open(self.tpath+'set_ftrace_notrace', 'w')
+ fp.write('native_queued_spin_lock_slowpath\ndev_driver_string')
+ fp.close()
# set trace format options
self.fsetVal('print-parent', 'trace_options')
self.fsetVal('funcgraph-abstime', 'trace_options')
@@ -846,6 +890,8 @@ class SystemValues:
fp.write('# turbostat %s\n' % test['turbo'])
if 'wifi' in test:
fp.write('# wifi %s\n' % test['wifi'])
+ if 'netfix' in test:
+ fp.write('# netfix %s\n' % test['netfix'])
if test['error'] or len(testdata) > 1:
fp.write('# enter_sleep_error %s\n' % test['error'])
return fp
@@ -865,6 +911,8 @@ class SystemValues:
fp.write('error%s: %s\n' % (n, testdata['error']))
else:
fp.write('result%s: pass\n' % n)
+ if 'mode' in testdata:
+ fp.write('mode%s: %s\n' % (n, testdata['mode']))
for v in ['suspend', 'resume', 'boot', 'lastinit']:
if v in testdata:
fp.write('%s%s: %.3f\n' % (v, n, testdata[v]))
@@ -901,6 +949,8 @@ class SystemValues:
fp.write(text)
fp.close()
def dlog(self, text):
+ if not self.dmesgfile:
+ return
self.putlog(self.dmesgfile, '# %s\n' % text)
def flog(self, text):
self.putlog(self.ftracefile, text)
@@ -954,34 +1004,31 @@ class SystemValues:
dirname = props[dev].syspath
if not dirname or not os.path.exists(dirname):
continue
- with open(dirname+'/power/async') as fp:
- text = fp.read()
- props[dev].isasync = False
- if 'enabled' in text:
+ props[dev].isasync = False
+ if os.path.exists(dirname+'/power/async'):
+ fp = open(dirname+'/power/async')
+ if 'enabled' in fp.read():
props[dev].isasync = True
+ fp.close()
fields = os.listdir(dirname)
- if 'product' in fields:
- with open(dirname+'/product', 'rb') as fp:
- props[dev].altname = ascii(fp.read())
- elif 'name' in fields:
- with open(dirname+'/name', 'rb') as fp:
- props[dev].altname = ascii(fp.read())
- elif 'model' in fields:
- with open(dirname+'/model', 'rb') as fp:
- props[dev].altname = ascii(fp.read())
- elif 'description' in fields:
- with open(dirname+'/description', 'rb') as fp:
- props[dev].altname = ascii(fp.read())
- elif 'id' in fields:
- with open(dirname+'/id', 'rb') as fp:
- props[dev].altname = ascii(fp.read())
- elif 'idVendor' in fields and 'idProduct' in fields:
- idv, idp = '', ''
- with open(dirname+'/idVendor', 'rb') as fp:
- idv = ascii(fp.read()).strip()
- with open(dirname+'/idProduct', 'rb') as fp:
- idp = ascii(fp.read()).strip()
- props[dev].altname = '%s:%s' % (idv, idp)
+ for file in ['product', 'name', 'model', 'description', 'id', 'idVendor']:
+ if file not in fields:
+ continue
+ try:
+ with open(os.path.join(dirname, file), 'rb') as fp:
+ props[dev].altname = ascii(fp.read())
+ except:
+ continue
+ if file == 'idVendor':
+ idv, idp = props[dev].altname.strip(), ''
+ try:
+ with open(os.path.join(dirname, 'idProduct'), 'rb') as fp:
+ idp = ascii(fp.read()).strip()
+ except:
+ props[dev].altname = ''
+ break
+ props[dev].altname = '%s:%s' % (idv, idp)
+ break
if props[dev].altname:
out = props[dev].altname.strip().replace('\n', ' ')\
.replace(',', ' ').replace(';', ' ')
@@ -1047,7 +1094,7 @@ class SystemValues:
self.cmd1[name] = self.dictify(info, delta)
elif not debug and delta and name in self.cmd1:
before, after = self.cmd1[name], self.dictify(info, delta)
- dinfo = ('\t%s\n' % before['@']) if '@' in before else ''
+ dinfo = ('\t%s\n' % before['@']) if '@' in before and len(before) > 1 else ''
prefix = self.commonPrefix(list(before.keys()))
for key in sorted(before):
if key in after and before[key] != after[key]:
@@ -1128,6 +1175,22 @@ class SystemValues:
val = valline[idx]
out.append('%s=%s' % (key, val))
return '|'.join(out)
+ def netfixon(self, net='both'):
+ cmd = self.getExec('netfix')
+ if not cmd:
+ return ''
+ fp = Popen([cmd, '-s', net, 'on'], stdout=PIPE, stderr=PIPE).stdout
+ out = ascii(fp.read()).strip()
+ fp.close()
+ return out
+ def wifiRepair(self):
+ out = self.netfixon('wifi')
+ if not out or 'error' in out.lower():
+ return ''
+ m = re.match('WIFI \S* ONLINE (?P<action>\S*)', out)
+ if not m:
+ return 'dead'
+ return m.group('action')
def wifiDetails(self, dev):
try:
info = open('/sys/class/net/%s/device/uevent' % dev, 'r').read().strip()
@@ -1144,12 +1207,12 @@ class SystemValues:
except:
return ''
for line in reversed(w.split('\n')):
- m = re.match(' *(?P<dev>.*): (?P<stat>[0-9a-f]*) .*', w.split('\n')[-1])
+ m = re.match(' *(?P<dev>.*): (?P<stat>[0-9a-f]*) .*', line)
if not m or (dev and dev != m.group('dev')):
continue
return m.group('dev')
return ''
- def pollWifi(self, dev, timeout=60):
+ def pollWifi(self, dev, timeout=10):
start = time.time()
while (time.time() - start) < timeout:
w = self.checkWifi(dev)
@@ -1157,6 +1220,11 @@ class SystemValues:
return '%s reconnected %.2f' % \
(self.wifiDetails(dev), max(0, time.time() - start))
time.sleep(0.01)
+ if self.netfix:
+ res = self.wifiRepair()
+ if res:
+ timeout = max(0, time.time() - start)
+ return '%s %s %d' % (self.wifiDetails(dev), res, timeout)
return '%s timeout %d' % (self.wifiDetails(dev), timeout)
def errorSummary(self, errinfo, msg):
found = False
@@ -1283,10 +1351,10 @@ sysvals = SystemValues()
switchvalues = ['enable', 'disable', 'on', 'off', 'true', 'false', '1', '0']
switchoff = ['disable', 'off', 'false', '0']
suspendmodename = {
- 'freeze': 'Freeze (S0)',
- 'standby': 'Standby (S1)',
- 'mem': 'Suspend (S3)',
- 'disk': 'Hibernate (S4)'
+ 'standby': 'standby (S1)',
+ 'freeze': 'freeze (S2idle)',
+ 'mem': 'suspend (S3)',
+ 'disk': 'hibernate (S4)'
}
# Class: DevProps
@@ -1376,6 +1444,7 @@ class Data:
'INVALID' : r'(?i).*\bINVALID\b.*',
'CRASH' : r'(?i).*\bCRASHED\b.*',
'TIMEOUT' : r'(?i).*\bTIMEOUT\b.*',
+ 'ABORT' : r'(?i).*\bABORT\b.*',
'IRQ' : r'.*\bgenirq: .*',
'TASKFAIL': r'.*Freezing of tasks *.*',
'ACPI' : r'.*\bACPI *(?P<b>[A-Za-z]*) *Error[: ].*',
@@ -1724,9 +1793,9 @@ class Data:
if 'waking' in self.dmesg[lp]:
tCnt = self.dmesg[lp]['waking'][0]
if self.dmesg[lp]['waking'][1] >= 0.001:
- tTry = '-%.0f' % (round(self.dmesg[lp]['waking'][1] * 1000))
+ tTry = '%.0f' % (round(self.dmesg[lp]['waking'][1] * 1000))
else:
- tTry = '-%.3f' % (self.dmesg[lp]['waking'][1] * 1000)
+ tTry = '%.3f' % (self.dmesg[lp]['waking'][1] * 1000)
text = '%.0f (%s ms waking %d times)' % (tL * 1000, tTry, tCnt)
else:
text = '%.0f' % (tL * 1000)
@@ -2107,6 +2176,30 @@ class Data:
# set resume complete to end at end marker
if 'resume_complete' in dm:
dm['resume_complete']['end'] = time
+ def initcall_debug_call(self, line, quick=False):
+ m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
+ 'PM: *calling .* @ (?P<n>.*), parent: (?P<p>.*)', line)
+ if not m:
+ m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
+ 'calling .* @ (?P<n>.*), parent: (?P<p>.*)', line)
+ if not m:
+ m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\
+ '(?P<f>.*)\+ @ (?P<n>.*), parent: (?P<p>.*)', line)
+ if m:
+ return True if quick else m.group('t', 'f', 'n', 'p')
+ return False if quick else ('', '', '', '')
+ def initcall_debug_return(self, line, quick=False):
+ m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: PM: '+\
+ '.* returned (?P<r>[0-9]*) after (?P<dt>[0-9]*) usecs', line)
+ if not m:
+ m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
+ '.* returned (?P<r>[0-9]*) after (?P<dt>[0-9]*) usecs', line)
+ if not m:
+ m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\
+ '(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', line)
+ if m:
+ return True if quick else m.group('t', 'f', 'dt')
+ return False if quick else ('', '', '')
def debugPrint(self):
for p in self.sortedPhases():
list = self.dmesg[p]['list']
@@ -2880,10 +2973,11 @@ class TestProps:
cmdlinefmt = '^# command \| (?P<cmd>.*)'
kparamsfmt = '^# kparams \| (?P<kp>.*)'
devpropfmt = '# Device Properties: .*'
- pinfofmt = '# platform-(?P<val>[a-z,A-Z,0-9]*): (?P<info>.*)'
+ pinfofmt = '# platform-(?P<val>[a-z,A-Z,0-9,_]*): (?P<info>.*)'
tracertypefmt = '# tracer: (?P<t>.*)'
firmwarefmt = '# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
procexecfmt = 'ps - (?P<ps>.*)$'
+ procmultifmt = '@(?P<n>[0-9]*)\|(?P<ps>.*)$'
ftrace_line_fmt_fg = \
'^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\
@@ -2893,6 +2987,9 @@ class TestProps:
'(?P<flags>\S*) *(?P<time>[0-9\.]*): *'+\
'(?P<msg>.*)'
machinesuspend = 'machine_suspend\[.*'
+ multiproclist = dict()
+ multiproctime = 0.0
+ multiproccnt = 0
def __init__(self):
self.stamp = ''
self.sysinfo = ''
@@ -3063,6 +3160,7 @@ class TestRun:
self.ttemp = dict()
class ProcessMonitor:
+ maxchars = 512
def __init__(self):
self.proclist = dict()
self.running = False
@@ -3088,19 +3186,23 @@ class ProcessMonitor:
if ujiff > 0 or kjiff > 0:
running[pid] = ujiff + kjiff
process.wait()
- out = ''
+ out = ['']
for pid in running:
jiffies = running[pid]
val = self.proclist[pid]
- if out:
- out += ','
- out += '%s-%s %d' % (val['name'], pid, jiffies)
- return 'ps - '+out
+ if len(out[-1]) > self.maxchars:
+ out.append('')
+ elif len(out[-1]) > 0:
+ out[-1] += ','
+ out[-1] += '%s-%s %d' % (val['name'], pid, jiffies)
+ if len(out) > 1:
+ for line in out:
+ sysvals.fsetVal('ps - @%d|%s' % (len(out), line), 'trace_marker')
+ else:
+ sysvals.fsetVal('ps - %s' % out[0], 'trace_marker')
def processMonitor(self, tid):
while self.running:
- out = self.procstat()
- if out:
- sysvals.fsetVal(out, 'trace_marker')
+ self.procstat()
def start(self):
self.thread = Thread(target=self.processMonitor, args=(0,))
self.running = True
@@ -3144,7 +3246,6 @@ def doesTraceLogHaveTraceEvents():
# Function: appendIncompleteTraceLog
# Description:
-# [deprecated for kernel 3.15 or newer]
# Adds callgraph data which lacks trace event data. This is only
# for timelines generated from 3.15 or older
# Arguments:
@@ -3246,6 +3347,61 @@ def appendIncompleteTraceLog(testruns):
dev['ftrace'] = cg
break
+# Function: loadTraceLog
+# Description:
+# load the ftrace file into memory and fix up any ordering issues
+# Output:
+# TestProps instance and an array of lines in proper order
+def loadTraceLog():
+ tp, data, lines, trace = TestProps(), dict(), [], []
+ tf = sysvals.openlog(sysvals.ftracefile, 'r')
+ for line in tf:
+ # remove any latent carriage returns
+ line = line.replace('\r\n', '')
+ if tp.stampInfo(line, sysvals):
+ continue
+ # ignore all other commented lines
+ if line[0] == '#':
+ continue
+ # ftrace line: parse only valid lines
+ m = re.match(tp.ftrace_line_fmt, line)
+ if(not m):
+ continue
+ dur = m.group('dur') if tp.cgformat else 'traceevent'
+ info = (m.group('time'), m.group('proc'), m.group('pid'),
+ m.group('msg'), dur)
+ # group the data by timestamp
+ t = float(info[0])
+ if t in data:
+ data[t].append(info)
+ else:
+ data[t] = [info]
+ # we only care about trace event ordering
+ if (info[3].startswith('suspend_resume:') or \
+ info[3].startswith('tracing_mark_write:')) and t not in trace:
+ trace.append(t)
+ tf.close()
+ for t in sorted(data):
+ first, last, blk = [], [], data[t]
+ if len(blk) > 1 and t in trace:
+ # move certain lines to the start or end of a timestamp block
+ for i in range(len(blk)):
+ if 'SUSPEND START' in blk[i][3]:
+ first.append(i)
+ elif re.match('.* timekeeping_freeze.*begin', blk[i][3]):
+ last.append(i)
+ elif re.match('.* timekeeping_freeze.*end', blk[i][3]):
+ first.append(i)
+ elif 'RESUME COMPLETE' in blk[i][3]:
+ last.append(i)
+ if len(first) == 1 and len(last) == 0:
+ blk.insert(0, blk.pop(first[0]))
+ elif len(last) == 1 and len(first) == 0:
+ blk.append(blk.pop(last[0]))
+ for info in blk:
+ lines.append(info)
+ return (tp, lines)
+
# Function: parseTraceLog
# Description:
# Analyze an ftrace log output file generated from this app during
@@ -3271,32 +3427,12 @@ def parseTraceLog(live=False):
# extract the callgraph and traceevent data
s2idle_enter = hwsus = False
- tp = TestProps()
testruns, testdata = [], []
testrun, data, limbo = 0, 0, True
- tf = sysvals.openlog(sysvals.ftracefile, 'r')
phase = 'suspend_prepare'
- for line in tf:
- # remove any latent carriage returns
- line = line.replace('\r\n', '')
- if tp.stampInfo(line, sysvals):
- continue
- # ignore all other commented lines
- if line[0] == '#':
- continue
- # ftrace line: parse only valid lines
- m = re.match(tp.ftrace_line_fmt, line)
- if(not m):
- continue
+ tp, tf = loadTraceLog()
+ for m_time, m_proc, m_pid, m_msg, m_param3 in tf:
# gather the basic message data from the line
- m_time = m.group('time')
- m_proc = m.group('proc')
- m_pid = m.group('pid')
- m_msg = m.group('msg')
- if(tp.cgformat):
- m_param3 = m.group('dur')
- else:
- m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
@@ -3322,14 +3458,29 @@ def parseTraceLog(live=False):
if t.type == 'tracing_mark_write':
m = re.match(tp.procexecfmt, t.name)
if(m):
- proclist = dict()
- for ps in m.group('ps').split(','):
+ parts, msg = 1, m.group('ps')
+ m = re.match(tp.procmultifmt, msg)
+ if(m):
+ parts, msg = int(m.group('n')), m.group('ps')
+ if tp.multiproccnt == 0:
+ tp.multiproctime = t.time
+ tp.multiproclist = dict()
+ proclist = tp.multiproclist
+ tp.multiproccnt += 1
+ else:
+ proclist = dict()
+ tp.multiproccnt = 0
+ for ps in msg.split(','):
val = ps.split()
- if not val:
+ if not val or len(val) != 2:
continue
name = val[0].replace('--', '-')
proclist[name] = int(val[1])
- data.pstl[t.time] = proclist
+ if parts == 1:
+ data.pstl[t.time] = proclist
+ elif parts == tp.multiproccnt:
+ data.pstl[tp.multiproctime] = proclist
+ tp.multiproccnt = 0
continue
# find the end of resume
if(t.endMarker()):
@@ -3545,7 +3696,6 @@ def parseTraceLog(live=False):
testrun.ftemp[key].append(FTraceCallGraph(pid, sysvals))
if(res == -1):
testrun.ftemp[key][-1].addLine(t)
- tf.close()
if len(testdata) < 1:
sysvals.vprint('WARNING: ftrace start marker is missing')
if data and not data.devicegroups:
@@ -3667,7 +3817,13 @@ def parseTraceLog(live=False):
if p not in data.dmesg:
if not terr:
ph = p if 'machine' in p else lp
- terr = '%s%s failed in %s phase' % (sysvals.suspendmode, tn, ph)
+ if p == 'suspend_machine':
+ sm = sysvals.suspendmode
+ if sm in suspendmodename:
+ sm = suspendmodename[sm]
+ terr = 'test%s did not enter %s power mode' % (tn, sm)
+ else:
+ terr = '%s%s failed in %s phase' % (sysvals.suspendmode, tn, ph)
pprint('TEST%s FAILED: %s' % (tn, terr))
error.append(terr)
if data.tSuspended == 0:
@@ -3708,9 +3864,7 @@ def parseTraceLog(live=False):
# Function: loadKernelLog
# Description:
-# [deprecated for kernel 3.15.0 or newer]
# load the dmesg file into memory and fix up any ordering issues
-# The dmesg filename is taken from sysvals
# Output:
# An array of empty Data objects with only their dmesgtext attributes set
def loadKernelLog():
@@ -3736,7 +3890,8 @@ def loadKernelLog():
if(not m):
continue
msg = m.group("msg")
- if(re.match('PM: Syncing filesystems.*', msg)):
+ if re.match('PM: Syncing filesystems.*', msg) or \
+ re.match('PM: suspend entry.*', msg):
if(data):
testruns.append(data)
data = Data(len(testruns))
@@ -3747,11 +3902,17 @@ def loadKernelLog():
if(m):
sysvals.stamp['kernel'] = m.group('k')
m = re.match('PM: Preparing system for (?P<m>.*) sleep', msg)
- if(m):
+ if not m:
+ m = re.match('PM: Preparing system for sleep \((?P<m>.*)\)', msg)
+ if m:
sysvals.stamp['mode'] = sysvals.suspendmode = m.group('m')
data.dmesgtext.append(line)
lf.close()
+ if sysvals.suspendmode == 's2idle':
+ sysvals.suspendmode = 'freeze'
+ elif sysvals.suspendmode == 'deep':
+ sysvals.suspendmode = 'mem'
if data:
testruns.append(data)
if len(testruns) < 1:
@@ -3762,12 +3923,9 @@ def loadKernelLog():
for data in testruns:
last = ''
for line in data.dmesgtext:
- mc = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\
- '(?P<f>.*)\+ @ .*, parent: .*', line)
- mr = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\
- '(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', last)
- if(mc and mr and (mc.group('t') == mr.group('t')) and
- (mc.group('f') == mr.group('f'))):
+ ct, cf, n, p = data.initcall_debug_call(line)
+ rt, rf, l = data.initcall_debug_return(last)
+ if ct and rt and ct == rt and cf == rf:
i = data.dmesgtext.index(last)
j = data.dmesgtext.index(line)
data.dmesgtext[i] = line
@@ -3777,7 +3935,6 @@ def loadKernelLog():
# Function: parseKernelLog
# Description:
-# [deprecated for kernel 3.15.0 or newer]
# Analyse a dmesg log output file generated from this app during
# the execution phase. Create a set of device structures in memory
# for subsequent formatting in the html output file
@@ -3796,30 +3953,30 @@ def parseKernelLog(data):
# dmesg phase match table
dm = {
- 'suspend_prepare': ['PM: Syncing filesystems.*'],
- 'suspend': ['PM: Entering [a-z]* sleep.*', 'Suspending console.*'],
- 'suspend_late': ['PM: suspend of devices complete after.*'],
- 'suspend_noirq': ['PM: late suspend of devices complete after.*'],
- 'suspend_machine': ['PM: noirq suspend of devices complete after.*'],
- 'resume_machine': ['ACPI: Low-level resume complete.*'],
- 'resume_noirq': ['ACPI: Waking up from system sleep state.*'],
- 'resume_early': ['PM: noirq resume of devices complete after.*'],
- 'resume': ['PM: early resume of devices complete after.*'],
- 'resume_complete': ['PM: resume of devices complete after.*'],
+ 'suspend_prepare': ['PM: Syncing filesystems.*', 'PM: suspend entry.*'],
+ 'suspend': ['PM: Entering [a-z]* sleep.*', 'Suspending console.*',
+ 'PM: Suspending system .*'],
+ 'suspend_late': ['PM: suspend of devices complete after.*',
+ 'PM: freeze of devices complete after.*'],
+ 'suspend_noirq': ['PM: late suspend of devices complete after.*',
+ 'PM: late freeze of devices complete after.*'],
+ 'suspend_machine': ['PM: suspend-to-idle',
+ 'PM: noirq suspend of devices complete after.*',
+ 'PM: noirq freeze of devices complete after.*'],
+ 'resume_machine': ['PM: Timekeeping suspended for.*',
+ 'ACPI: Low-level resume complete.*',
+ 'ACPI: resume from mwait',
+ 'Suspended for [0-9\.]* seconds'],
+ 'resume_noirq': ['PM: resume from suspend-to-idle',
+ 'ACPI: Waking up from system sleep state.*'],
+ 'resume_early': ['PM: noirq resume of devices complete after.*',
+ 'PM: noirq restore of devices complete after.*'],
+ 'resume': ['PM: early resume of devices complete after.*',
+ 'PM: early restore of devices complete after.*'],
+ 'resume_complete': ['PM: resume of devices complete after.*',
+ 'PM: restore of devices complete after.*'],
'post_resume': ['.*Restarting tasks \.\.\..*'],
}
- if(sysvals.suspendmode == 'standby'):
- dm['resume_machine'] = ['PM: Restoring platform NVS memory']
- elif(sysvals.suspendmode == 'disk'):
- dm['suspend_late'] = ['PM: freeze of devices complete after.*']
- dm['suspend_noirq'] = ['PM: late freeze of devices complete after.*']
- dm['suspend_machine'] = ['PM: noirq freeze of devices complete after.*']
- dm['resume_machine'] = ['PM: Restoring platform NVS memory']
- dm['resume_early'] = ['PM: noirq restore of devices complete after.*']
- dm['resume'] = ['PM: early restore of devices complete after.*']
- dm['resume_complete'] = ['PM: restore of devices complete after.*']
- elif(sysvals.suspendmode == 'freeze'):
- dm['resume_machine'] = ['ACPI: resume from mwait']
# action table (expected events that occur and show up in dmesg)
at = {
@@ -3867,12 +4024,13 @@ def parseKernelLog(data):
for s in dm[p]:
if(re.match(s, msg)):
phasechange, phase = True, p
+ dm[p] = [s]
break
# hack for determining resume_machine end for freeze
if(not sysvals.usetraceevents and sysvals.suspendmode == 'freeze' \
and phase == 'resume_machine' and \
- re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
+ data.initcall_debug_call(line, True)):
data.setPhase(phase, ktime, False)
phase = 'resume_noirq'
data.setPhase(phase, ktime, True)
@@ -3945,26 +4103,18 @@ def parseKernelLog(data):
# -- device callbacks --
if(phase in data.sortedPhases()):
# device init call
- if(re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)):
- sm = re.match('calling (?P<f>.*)\+ @ '+\
- '(?P<n>.*), parent: (?P<p>.*)', msg);
- f = sm.group('f')
- n = sm.group('n')
- p = sm.group('p')
- if(f and n and p):
- data.newAction(phase, f, int(n), p, ktime, -1, '')
- # device init return
- elif(re.match('call (?P<f>.*)\+ returned .* after '+\
- '(?P<t>.*) usecs', msg)):
- sm = re.match('call (?P<f>.*)\+ returned .* after '+\
- '(?P<t>.*) usecs(?P<a>.*)', msg);
- f = sm.group('f')
- t = sm.group('t')
- list = data.dmesg[phase]['list']
- if(f in list):
- dev = list[f]
- dev['length'] = int(t)
- dev['end'] = ktime
+ t, f, n, p = data.initcall_debug_call(line)
+ if t and f and n and p:
+ data.newAction(phase, f, int(n), p, ktime, -1, '')
+ else:
+ # device init return
+ t, f, l = data.initcall_debug_return(line)
+ if t and f and l:
+ list = data.dmesg[phase]['list']
+ if(f in list):
+ dev = list[f]
+ dev['length'] = int(l)
+ dev['end'] = ktime
# if trace events are not available, these are better than nothing
if(not sysvals.usetraceevents):
@@ -4006,6 +4156,8 @@ def parseKernelLog(data):
# fill in any missing phases
phasedef = data.phasedef
terr, lp = '', 'suspend_prepare'
+ if lp not in data.dmesg:
+ doError('dmesg log format has changed, could not find start of suspend')
for p in sorted(phasedef, key=lambda k:phasedef[k]['order']):
if p not in data.dmesg:
if not terr:
@@ -5302,7 +5454,7 @@ def executeSuspend(quiet=False):
sv.dlog('read dmesg')
sv.initdmesg()
# start ftrace
- if(sv.usecallgraph or sv.usetraceevents):
+ if sv.useftrace:
if not quiet:
pprint('START TRACING')
sv.dlog('start ftrace tracing')
@@ -5334,8 +5486,7 @@ def executeSuspend(quiet=False):
sv.dlog('enable RTC wake alarm')
sv.rtcWakeAlarmOn()
# start of suspend trace marker
- if(sv.usecallgraph or sv.usetraceevents):
- sv.fsetVal(datetime.now().strftime(sv.tmstart), 'trace_marker')
+ sv.fsetVal(datetime.now().strftime(sv.tmstart), 'trace_marker')
# predelay delay
if(count == 1 and sv.predelay > 0):
sv.fsetVal('WAIT %d' % sv.predelay, 'trace_marker')
@@ -5384,11 +5535,17 @@ def executeSuspend(quiet=False):
sv.fsetVal('WAIT END', 'trace_marker')
# return from suspend
pprint('RESUME COMPLETE')
- if(sv.usecallgraph or sv.usetraceevents):
- sv.fsetVal(datetime.now().strftime(sv.tmend), 'trace_marker')
+ sv.fsetVal(datetime.now().strftime(sv.tmend), 'trace_marker')
if sv.wifi and wifi:
tdata['wifi'] = sv.pollWifi(wifi)
sv.dlog('wifi check, %s' % tdata['wifi'])
+ if sv.netfix:
+ netfixout = sv.netfixon('wired')
+ elif sv.netfix:
+ netfixout = sv.netfixon()
+ if sv.netfix and netfixout:
+ tdata['netfix'] = netfixout
+ sv.dlog('netfix, %s' % tdata['netfix'])
if(sv.suspendmode == 'mem' or sv.suspendmode == 'command'):
sv.dlog('read the ACPI FPDT')
tdata['fw'] = getFPDT(False)
@@ -5396,7 +5553,7 @@ def executeSuspend(quiet=False):
sv.dlog('run the cmdinfo list after')
cmdafter = sv.cmdinfo(False)
# stop ftrace
- if(sv.usecallgraph or sv.usetraceevents):
+ if sv.useftrace:
if sv.useprocmon:
sv.dlog('stop the process monitor')
pm.stop()
@@ -5407,7 +5564,7 @@ def executeSuspend(quiet=False):
sysvals.dlog('EXECUTION TRACE END')
sv.getdmesg(testdata)
# grab a copy of the ftrace output
- if(sv.usecallgraph or sv.usetraceevents):
+ if sv.useftrace:
if not quiet:
pprint('CAPTURING TRACE')
op = sv.writeDatafileHeader(sv.ftracefile, testdata)
@@ -5838,13 +5995,19 @@ def statusCheck(probecheck=False):
pprint(' please choose one with -m')
# check if ftrace is available
- res = sysvals.colorText('NO')
- ftgood = sysvals.verifyFtrace()
- if(ftgood):
- res = 'YES'
- elif(sysvals.usecallgraph):
- status = 'ftrace is not properly supported'
- pprint(' is ftrace supported: %s' % res)
+ if sysvals.useftrace:
+ res = sysvals.colorText('NO')
+ sysvals.useftrace = sysvals.verifyFtrace()
+ efmt = '"{0}" uses ftrace, and it is not properly supported'
+ if sysvals.useftrace:
+ res = 'YES'
+ elif sysvals.usecallgraph:
+ status = efmt.format('-f')
+ elif sysvals.usedevsrc:
+ status = efmt.format('-dev')
+ elif sysvals.useprocmon:
+ status = efmt.format('-proc')
+ pprint(' is ftrace supported: %s' % res)
# check if kprobes are available
if sysvals.usekprobes:
@@ -5857,8 +6020,8 @@ def statusCheck(probecheck=False):
pprint(' are kprobes supported: %s' % res)
# what data source are we using
- res = 'DMESG'
- if(ftgood):
+ res = 'DMESG (very limited, ftrace is preferred)'
+ if sysvals.useftrace:
sysvals.usetraceevents = True
for e in sysvals.traceevents:
if not os.path.exists(sysvals.epath+e):
@@ -5879,7 +6042,7 @@ def statusCheck(probecheck=False):
pprint(' optional commands this tool may use for info:')
no = sysvals.colorText('MISSING')
yes = sysvals.colorText('FOUND', 32)
- for c in ['turbostat', 'mcelog', 'lspci', 'lsusb']:
+ for c in ['turbostat', 'mcelog', 'lspci', 'lsusb', 'netfix']:
if c == 'turbostat':
res = yes if sysvals.haveTurbostat() else no
else:
@@ -5971,7 +6134,7 @@ def processData(live=False, quiet=False):
if not sysvals.stamp:
pprint('ERROR: data does not include the expected stamp')
return (testruns, {'error': 'timeline generation failed'})
- shown = ['bios', 'biosdate', 'cpu', 'host', 'kernel', 'man', 'memfr',
+ shown = ['os', 'bios', 'biosdate', 'cpu', 'host', 'kernel', 'man', 'memfr',
'memsz', 'mode', 'numcpu', 'plat', 'time', 'wifi']
sysvals.vprint('System Info:')
for key in sorted(sysvals.stamp):
@@ -6052,6 +6215,8 @@ def runTest(n=0, quiet=False):
if sysvals.display:
ret = sysvals.displayControl('init')
sysvals.dlog('xset display init, ret = %d' % ret)
+ sysvals.testVal(sysvals.pmdpath, 'basic', '1')
+ sysvals.testVal(sysvals.s0ixpath, 'basic', 'Y')
sysvals.dlog('initialize ftrace')
sysvals.initFtrace(quiet)
@@ -6145,9 +6310,12 @@ def data_from_html(file, outpath, issues, fulldetail=False):
elist[err[0]] += 1
for i in elist:
ilist.append('%sx%d' % (i, elist[i]) if elist[i] > 1 else i)
- wifi = find_in_html(html, 'Wifi Resume: ', '</td>')
- if wifi:
- extra['wifi'] = wifi
+ line = find_in_html(log, '# wifi ', '\n')
+ if line:
+ extra['wifi'] = line
+ line = find_in_html(log, '# netfix ', '\n')
+ if line:
+ extra['netfix'] = line
low = find_in_html(html, 'freeze time: <b>', ' ms</b>')
for lowstr in ['waking', '+']:
if not low:
@@ -6243,7 +6411,7 @@ def genHtml(subdir, force=False):
sysvals.ftracefile = file
sysvals.setOutputFile()
if (sysvals.dmesgfile or sysvals.ftracefile) and sysvals.htmlfile and \
- (force or not sysvals.usable(sysvals.htmlfile)):
+ (force or not sysvals.usable(sysvals.htmlfile, True)):
pprint('FTRACE: %s' % sysvals.ftracefile)
if sysvals.dmesgfile:
pprint('DMESG : %s' % sysvals.dmesgfile)
@@ -6533,6 +6701,7 @@ def printHelp():
' -skiphtml Run the test and capture the trace logs, but skip the timeline (default: disabled)\n'\
' -result fn Export a results table to a text file for parsing.\n'\
' -wifi If a wifi connection is available, check that it reconnects after resume.\n'\
+ ' -netfix Use netfix to reset the network in the event it fails to resume.\n'\
' [testprep]\n'\
' -sync Sync the filesystems before starting the test\n'\
' -rs on/off Enable/disable runtime suspend for all devices, restore all after test\n'\
@@ -6615,6 +6784,8 @@ if __name__ == '__main__':
elif(arg == '-v'):
pprint("Version %s" % sysvals.version)
sys.exit(0)
+ elif(arg == '-debugtiming'):
+ debugtiming = True
elif(arg == '-x2'):
sysvals.execcount = 2
elif(arg == '-x2delay'):
@@ -6657,6 +6828,8 @@ if __name__ == '__main__':
sysvals.sync = True
elif(arg == '-wifi'):
sysvals.wifi = True
+ elif(arg == '-netfix'):
+ sysvals.netfix = True
elif(arg == '-gzip'):
sysvals.gzip = True
elif(arg == '-info'):
@@ -6819,7 +6992,7 @@ if __name__ == '__main__':
sysvals.outdir = val
sysvals.notestrun = True
if(os.path.isdir(val) == False):
- doError('%s is not accessible' % val)
+ doError('%s is not accesible' % val)
elif(arg == '-filter'):
try:
val = next(args)
@@ -6942,12 +7115,11 @@ if __name__ == '__main__':
time.sleep(sysvals.multitest['delay'])
fmt = 'suspend-%y%m%d-%H%M%S'
sysvals.testdir = os.path.join(sysvals.outdir, datetime.now().strftime(fmt))
- ret = runTest(i+1, True)
+ ret = runTest(i+1, not sysvals.verbose)
failcnt = 0 if not ret else failcnt + 1
if sysvals.maxfail > 0 and failcnt >= sysvals.maxfail:
pprint('Maximum fail count of %d reached, aborting multitest' % (sysvals.maxfail))
break
- time.sleep(5)
sysvals.resetlog()
sysvals.multistat(False, i, finish)
if 'time' in sysvals.multitest and datetime.now() >= finish:
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
index f3e3c94ab9bd..92e139b9c792 100644
--- a/tools/power/x86/turbostat/Makefile
+++ b/tools/power/x86/turbostat/Makefile
@@ -9,7 +9,7 @@ ifeq ("$(origin O)", "command line")
endif
turbostat : turbostat.c
-override CFLAGS += -O2 -Wall -I../../../include
+override CFLAGS += -O2 -Wall -Wextra -I../../../include
override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
override CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"'
override CFLAGS += -D_FILE_OFFSET_BITS=64
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index 9b17097bc3d7..c7b26a3603af 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -92,40 +92,66 @@ displays the statistics gathered since it was forked.
.SH ROW DESCRIPTIONS
The system configuration dump (if --quiet is not used) is followed by statistics. The first row of the statistics labels the content of each column (below). The second row of statistics is the system summary line. The system summary line has a '-' in the columns for the Package, Core, and CPU. The contents of the system summary line depends on the type of column. Columns that count items (eg. IRQ) show the sum across all CPUs in the system. Columns that show a percentage show the average across all CPUs in the system. Columns that dump raw MSR values simply show 0 in the summary. After the system summary row, each row describes a specific Package/Core/CPU. Note that if the --cpu parameter is used to limit which specific CPUs are displayed, turbostat will still collect statistics for all CPUs in the system and will still show the system summary for all CPUs in the system.
.SH COLUMN DESCRIPTIONS
-.nf
+.PP
\fBusec\fP For each CPU, the number of microseconds elapsed during counter collection, including thread migration -- if any. This counter is disabled by default, and is enabled with "--enable usec", or --debug. On the summary row, usec refers to the total elapsed time to collect the counters on all cpus.
+.PP
\fBTime_Of_Day_Seconds\fP For each CPU, the gettimeofday(2) value (seconds.subsec since Epoch) when the counters ending the measurement interval were collected. This column is disabled by default, and can be enabled with "--enable Time_Of_Day_Seconds" or "--debug". On the summary row, Time_Of_Day_Seconds refers to the timestamp following collection of counters on the last CPU.
+.PP
\fBCore\fP processor core number. Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading Technology (HT).
+.PP
\fBCPU\fP Linux CPU (logical processor) number. Yes, it is okay that on many systems the CPUs are not listed in numerical order -- for efficiency reasons, turbostat runs in topology order, so HT siblings appear together.
+.PP
\fBPackage\fP processor package number -- not present on systems with a single processor package.
+.PP
\fBAvg_MHz\fP number of cycles executed divided by time elapsed. Note that this includes idle-time when 0 instructions are executed.
+.PP
\fBBusy%\fP percent of the measurement interval that the CPU executes instructions, aka. % of time in "C0" state.
+.PP
\fBBzy_MHz\fP average clock rate while the CPU was not idle (ie. in "c0" state).
+.PP
\fBTSC_MHz\fP average MHz that the TSC ran during the entire interval.
+.PP
\fBIRQ\fP The number of interrupts serviced by that CPU during the measurement interval. The system total line is the sum of interrupts serviced across all CPUs. turbostat parses /proc/interrupts to generate this summary.
+.PP
\fBSMI\fP The number of System Management Interrupts serviced CPU during the measurement interval. While this counter is actually per-CPU, SMI are triggered on all processors, so the number should be the same for all CPUs.
+.PP
\fBC1, C2, C3...\fP The number times Linux requested the C1, C2, C3 idle state during the measurement interval. The system summary line shows the sum for all CPUs. These are C-state names as exported in /sys/devices/system/cpu/cpu*/cpuidle/state*/name. While their names are generic, their attributes are processor specific. They the system description section of output shows what MWAIT sub-states they are mapped to on each system.
+.PP
\fBC1%, C2%, C3%\fP The residency percentage that Linux requested C1, C2, C3.... The system summary is the average of all CPUs in the system. Note that these are software, reflecting what was requested. The hardware counters reflect what was actually achieved.
+.PP
\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. These numbers are from hardware residency counters.
+.PP
\fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor.
+.PP
\fBPkgTmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
+.PP
\fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms.
+.PP
\fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz.
+.PP
\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters.
+.PP
\fBPkgWatt\fP Watts consumed by the whole package.
+.PP
\fBCorWatt\fP Watts consumed by the core part of the package.
+.PP
\fBGFXWatt\fP Watts consumed by the Graphics part of the package -- available only on client processors.
+.PP
\fBRAMWatt\fP Watts consumed by the DRAM DIMMS -- available only on server processors.
+.PP
\fBPKG_%\fP percent of the interval that RAPL throttling was active on the Package. Note that the system summary is the sum of the package throttling time, and thus may be higher than 100% on a multi-package system. Note that the meaning of this field is model specific. For example, some hardware increments this counter when RAPL responds to thermal limits, but does not increment this counter when RAPL responds to power limits. Comparing PkgWatt and PkgTmp to system limits is necessary.
+.PP
\fBRAM_%\fP percent of the interval that RAPL throttling was active on DRAM.
-.fi
+.PP
+\fBUncMHz\fP uncore MHz, instantaneous sample.
.SH TOO MUCH INFORMATION EXAMPLE
By default, turbostat dumps all possible information -- a system configuration header, followed by columns for all counters.
This is ideal for remote debugging, use the "--out" option to save everything to a text file, and get that file to the expert helping you debug.
.PP
When you are not interested in all that information, and there are several ways to see only what you want. First the "--quiet" option will skip the configuration information, and turbostat will show only the counter columns. Second, you can reduce the columns with the "--hide" and "--show" options. If you use the "--show" option, then turbostat will show only the columns you list. If you use the "--hide" option, turbostat will show all columns, except the ones you list.
.PP
-To find out what columns are available for --show and --hide, the "--list" option is available. For convenience, the special strings "sysfs" can be used to refer to all of the sysfs C-state counters at once:
+To find out what columns are available for --show and --hide, the "--list" option is available. Usually, the CATEGORY names above are used to refer to groups of counters. Also, for convenience, the special string "sysfs" can be used to refer to all of the sysfs C-state counters at once:
+.PP
.nf
sudo ./turbostat --show sysfs --quiet sleep 10
10.003837 sec
@@ -158,32 +184,29 @@ Without a command to fork, turbostat displays statistics ever 5 seconds.
Periodic output goes to stdout, by default, unless --out is used to specify an output file.
The 5-second interval can be changed with the "-i sec" option.
.nf
-sudo ./turbostat --quiet --hide sysfs,IRQ,SMI,CoreTmp,PkgTmp,GFX%rc6,GFXMHz,PkgWatt,CorWatt,GFXWatt
- Core CPU Avg_MHz Busy% Bzy_MHz TSC_MHz CPU%c1 CPU%c3 CPU%c6 CPU%c7
- - - 488 12.52 3900 3498 12.50 0.00 0.00 74.98
- 0 0 5 0.13 3900 3498 99.87 0.00 0.00 0.00
- 0 4 3897 99.99 3900 3498 0.01
- 1 1 0 0.00 3856 3498 0.01 0.00 0.00 99.98
- 1 5 0 0.00 3861 3498 0.01
- 2 2 1 0.02 3889 3498 0.03 0.00 0.00 99.95
- 2 6 0 0.00 3863 3498 0.05
- 3 3 0 0.01 3869 3498 0.02 0.00 0.00 99.97
- 3 7 0 0.00 3878 3498 0.03
- Core CPU Avg_MHz Busy% Bzy_MHz TSC_MHz CPU%c1 CPU%c3 CPU%c6 CPU%c7
- - - 491 12.59 3900 3498 12.42 0.00 0.00 74.99
- 0 0 27 0.69 3900 3498 99.31 0.00 0.00 0.00
- 0 4 3898 99.99 3900 3498 0.01
- 1 1 0 0.00 3883 3498 0.01 0.00 0.00 99.99
- 1 5 0 0.00 3898 3498 0.01
- 2 2 0 0.01 3889 3498 0.02 0.00 0.00 99.98
- 2 6 0 0.00 3889 3498 0.02
- 3 3 0 0.00 3856 3498 0.01 0.00 0.00 99.99
- 3 7 0 0.00 3897 3498 0.01
+sudo turbostat --quiet --show CPU,frequency
+ Core CPU Avg_MHz Busy% Bzy_MHz TSC_MHz CPU%c7 UncMhz
+ - - 524 12.48 4198 3096 74.53 3800
+ 0 0 4 0.09 4081 3096 98.88 3800
+ 0 4 1 0.02 4063 3096
+ 1 1 2 0.06 4063 3096 99.60
+ 1 5 2 0.05 4070 3096
+ 2 2 4178 99.52 4199 3096 0.00
+ 2 6 3 0.08 4159 3096
+ 3 3 1 0.04 4046 3096 99.66
+ 3 7 0 0.01 3989 3096
+ Core CPU Avg_MHz Busy% Bzy_MHz TSC_MHz CPU%c7 UncMhz
+ - - 525 12.52 4198 3096 74.54 3800
+ 0 0 4 0.10 4051 3096 99.49 3800
+ 0 4 2 0.04 3993 3096
+ 1 1 3 0.07 4054 3096 99.56
+ 1 5 4 0.10 4018 3096
+ 2 2 4178 99.51 4199 3096 0.00
+ 2 6 4 0.09 4143 3096
+ 3 3 2 0.06 4026 3096 99.10
+ 3 7 7 0.17 4074 3096
.fi
-This example also shows the use of the --hide option to skip columns that are not wanted.
-Note that cpu4 in this example is 99.99% busy, while the other CPUs are all under 1% busy.
-Notice that cpu4's HT sibling is cpu0, which is under 1% busy, but can get into CPU%c1 only,
-because its cpu4's activity on shared hardware keeps it from entering a deeper C-state.
+This example also shows the use of the --show option to show only the desired columns.
.SH SYSTEM CONFIGURATION INFORMATION EXAMPLE
@@ -191,61 +214,86 @@ By default, turbostat always dumps system configuration information
before taking measurements. In the example above, "--quiet" is used
to suppress that output. Here is an example of the configuration information:
.nf
-turbostat version 2017.02.15 - Len Brown <lenb@kernel.org>
-CPUID(0): GenuineIntel 13 CPUID levels; family:model:stepping 0x6:3c:3 (6:60:3)
-CPUID(1): SSE3 MONITOR - EIST TM2 TSC MSR ACPI-TM TM
-CPUID(6): APERF, TURBO, DTS, PTM, No-HWP, No-HWPnotify, No-HWPwindow, No-HWPepp, No-HWPpkg, EPB
-cpu4: MSR_IA32_MISC_ENABLE: 0x00850089 (TCC EIST No-MWAIT PREFETCH TURBO)
-CPUID(7): No-SGX
-cpu4: MSR_MISC_PWR_MGMT: 0x00400000 (ENable-EIST_Coordination DISable-EPB DISable-OOB)
-RAPL: 3121 sec. Joule Counter Range, at 84 Watts
-cpu4: MSR_PLATFORM_INFO: 0x80838f3012300
+turbostat version 2022.04.16 - Len Brown <lenb@kernel.org>
+Kernel command line: BOOT_IMAGE=/boot/vmlinuz-5.18.0-rc6-00001-ge6891250e3b5 ...
+CPUID(0): GenuineIntel 0x16 CPUID levels
+CPUID(1): family:model:stepping 0x6:9e:9 (6:158:9) microcode 0xea
+CPUID(0x80000000): max_extended_levels: 0x80000008
+CPUID(1): SSE3 MONITOR - EIST TM2 TSC MSR ACPI-TM HT TM
+CPUID(6): APERF, TURBO, DTS, PTM, HWP, HWPnotify, HWPwindow, HWPepp, No-HWPpkg, EPB
+cpu7: MSR_IA32_MISC_ENABLE: 0x00850089 (TCC EIST MWAIT PREFETCH TURBO)
+CPUID(7): SGX
+cpu7: MSR_IA32_FEATURE_CONTROL: 0x00000005 (Locked )
+CPUID(0x15): eax_crystal: 2 ebx_tsc: 258 ecx_crystal_hz: 0
+TSC: 3096 MHz (24000000 Hz * 258 / 2 / 1000000)
+CPUID(0x16): base_mhz: 3100 max_mhz: 4200 bus_mhz: 100
+cpu7: MSR_MISC_PWR_MGMT: 0x00401cc0 (ENable-EIST_Coordination DISable-EPB DISable-OOB)
+RAPL: 5825 sec. Joule Counter Range, at 45 Watts
+cpu7: MSR_PLATFORM_INFO: 0x80839f1011f00
8 * 100.0 = 800.0 MHz max efficiency frequency
-35 * 100.0 = 3500.0 MHz base frequency
-cpu4: MSR_IA32_POWER_CTL: 0x0004005d (C1E auto-promotion: DISabled)
-cpu4: MSR_TURBO_RATIO_LIMIT: 0x25262727
-37 * 100.0 = 3700.0 MHz max turbo 4 active cores
-38 * 100.0 = 3800.0 MHz max turbo 3 active cores
-39 * 100.0 = 3900.0 MHz max turbo 2 active cores
-39 * 100.0 = 3900.0 MHz max turbo 1 active cores
-cpu4: MSR_CONFIG_TDP_NOMINAL: 0x00000023 (base_ratio=35)
-cpu4: MSR_CONFIG_TDP_LEVEL_1: 0x00000000 ()
-cpu4: MSR_CONFIG_TDP_LEVEL_2: 0x00000000 ()
-cpu4: MSR_CONFIG_TDP_CONTROL: 0x80000000 ( lock=1)
-cpu4: MSR_TURBO_ACTIVATION_RATIO: 0x00000000 (MAX_NON_TURBO_RATIO=0 lock=0)
-cpu4: MSR_PKG_CST_CONFIG_CONTROL: 0x1e000400 (UNdemote-C3, UNdemote-C1, demote-C3, demote-C1, UNlocked: pkg-cstate-limit=0: pc0)
-cpu4: POLL: CPUIDLE CORE POLL IDLE
-cpu4: C1: MWAIT 0x00
-cpu4: C1E: MWAIT 0x01
-cpu4: C3: MWAIT 0x10
-cpu4: C6: MWAIT 0x20
-cpu4: C7s: MWAIT 0x32
-cpu4: MSR_MISC_FEATURE_CONTROL: 0x00000000 (L2-Prefetch L2-Prefetch-pair L1-Prefetch L1-IP-Prefetch)
-cpu0: MSR_IA32_ENERGY_PERF_BIAS: 0x00000006 (balanced)
-cpu0: MSR_CORE_PERF_LIMIT_REASONS, 0x31200000 (Active: ) (Logged: Transitions, MultiCoreTurbo, Amps, Auto-HWP, )
-cpu0: MSR_GFX_PERF_LIMIT_REASONS, 0x00000000 (Active: ) (Logged: )
-cpu0: MSR_RING_PERF_LIMIT_REASONS, 0x0d000000 (Active: ) (Logged: Amps, PkgPwrL1, PkgPwrL2, )
+31 * 100.0 = 3100.0 MHz base frequency
+cpu7: MSR_IA32_POWER_CTL: 0x002c005d (C1E auto-promotion: DISabled)
+cpu7: MSR_TURBO_RATIO_LIMIT: 0x2728292a
+39 * 100.0 = 3900.0 MHz max turbo 4 active cores
+40 * 100.0 = 4000.0 MHz max turbo 3 active cores
+41 * 100.0 = 4100.0 MHz max turbo 2 active cores
+42 * 100.0 = 4200.0 MHz max turbo 1 active cores
+cpu7: MSR_CONFIG_TDP_NOMINAL: 0x0000001f (base_ratio=31)
+cpu7: MSR_CONFIG_TDP_LEVEL_1: 0x00000000 ()
+cpu7: MSR_CONFIG_TDP_LEVEL_2: 0x00000000 ()
+cpu7: MSR_CONFIG_TDP_CONTROL: 0x80000000 ( lock=1)
+cpu7: MSR_TURBO_ACTIVATION_RATIO: 0x00000000 (MAX_NON_TURBO_RATIO=0 lock=0)
+cpu7: MSR_PKG_CST_CONFIG_CONTROL: 0x1e008008 (UNdemote-C3, UNdemote-C1, demote-C3, demote-C1, locked, pkg-cstate-limit=8 (unlimited))
+Uncore Frequency pkg0 die0: 800 - 3900 MHz (800 - 3900 MHz)
+/dev/cpu_dma_latency: 2000000000 usec (default)
+current_driver: intel_idle
+current_governor: menu
+current_governor_ro: menu
+cpu7: POLL: CPUIDLE CORE POLL IDLE
+cpu7: C1: MWAIT 0x00
+cpu7: C1E: MWAIT 0x01
+cpu7: C3: MWAIT 0x10
+cpu7: C6: MWAIT 0x20
+cpu7: C7s: MWAIT 0x33
+cpu7: C8: MWAIT 0x40
+cpu7: C9: MWAIT 0x50
+cpu7: C10: MWAIT 0x60
+cpu7: cpufreq driver: intel_pstate
+cpu7: cpufreq governor: performance
+cpufreq intel_pstate no_turbo: 0
+cpu7: MSR_MISC_FEATURE_CONTROL: 0x00000000 (L2-Prefetch L2-Prefetch-pair L1-Prefetch L1-IP-Prefetch)
+cpu0: MSR_PM_ENABLE: 0x00000001 (HWP)
+cpu0: MSR_HWP_CAPABILITIES: 0x01101f53 (high 83 guar 31 eff 16 low 1)
+cpu0: MSR_HWP_REQUEST: 0x00005353 (min 83 max 83 des 0 epp 0x0 window 0x0 pkg 0x0)
+cpu0: MSR_HWP_INTERRUPT: 0x00000001 (EN_Guaranteed_Perf_Change, Dis_Excursion_Min)
+cpu0: MSR_HWP_STATUS: 0x00000004 (No-Guaranteed_Perf_Change, No-Excursion_Min)
+cpu0: EPB: 6 (balanced)
cpu0: MSR_RAPL_POWER_UNIT: 0x000a0e03 (0.125000 Watts, 0.000061 Joules, 0.000977 sec.)
-cpu0: MSR_PKG_POWER_INFO: 0x000002a0 (84 W TDP, RAPL 0 - 0 W, 0.000000 sec.)
-cpu0: MSR_PKG_POWER_LIMIT: 0x428348001a82a0 (UNlocked)
-cpu0: PKG Limit #1: ENabled (84.000000 Watts, 8.000000 sec, clamp DISabled)
-cpu0: PKG Limit #2: ENabled (105.000000 Watts, 0.002441* sec, clamp DISabled)
+cpu0: MSR_PKG_POWER_INFO: 0x00000168 (45 W TDP, RAPL 0 - 0 W, 0.000000 sec.)
+cpu0: MSR_PKG_POWER_LIMIT: 0x42820800218208 (UNlocked)
+cpu0: PKG Limit #1: ENabled (65.000 Watts, 64.000000 sec, clamp ENabled)
+cpu0: PKG Limit #2: ENabled (65.000 Watts, 0.002441* sec, clamp DISabled)
+cpu0: MSR_VR_CURRENT_CONFIG: 0x00000000
+cpu0: PKG Limit #4: 0.000000 Watts (UNlocked)
+cpu0: MSR_DRAM_POWER_LIMIT: 0x5400de00000000 (UNlocked)
+cpu0: DRAM Limit: DISabled (0.000 Watts, 0.000977 sec, clamp DISabled)
cpu0: MSR_PP0_POLICY: 0
cpu0: MSR_PP0_POWER_LIMIT: 0x00000000 (UNlocked)
-cpu0: Cores Limit: DISabled (0.000000 Watts, 0.000977 sec, clamp DISabled)
+cpu0: Cores Limit: DISabled (0.000 Watts, 0.000977 sec, clamp DISabled)
cpu0: MSR_PP1_POLICY: 0
cpu0: MSR_PP1_POWER_LIMIT: 0x00000000 (UNlocked)
-cpu0: GFX Limit: DISabled (0.000000 Watts, 0.000977 sec, clamp DISabled)
-cpu0: MSR_IA32_TEMPERATURE_TARGET: 0x00641400 (100 C)
-cpu0: MSR_IA32_PACKAGE_THERM_STATUS: 0x884c0800 (24 C)
-cpu0: MSR_IA32_THERM_STATUS: 0x884c0000 (24 C +/- 1)
-cpu1: MSR_IA32_THERM_STATUS: 0x88510000 (19 C +/- 1)
-cpu2: MSR_IA32_THERM_STATUS: 0x884e0000 (22 C +/- 1)
-cpu3: MSR_IA32_THERM_STATUS: 0x88510000 (19 C +/- 1)
-cpu4: MSR_PKGC3_IRTL: 0x00008842 (valid, 67584 ns)
-cpu4: MSR_PKGC6_IRTL: 0x00008873 (valid, 117760 ns)
-cpu4: MSR_PKGC7_IRTL: 0x00008891 (valid, 148480 ns)
+cpu0: GFX Limit: DISabled (0.000 Watts, 0.000977 sec, clamp DISabled)
+cpu0: MSR_IA32_TEMPERATURE_TARGET: 0x00640000 (100 C) (100 default - 0 offset)
+cpu0: MSR_IA32_PACKAGE_THERM_STATUS: 0x88200800 (68 C)
+cpu0: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x00000003 (100 C, 100 C)
+cpu7: MSR_PKGC3_IRTL: 0x0000884e (valid, 79872 ns)
+cpu7: MSR_PKGC6_IRTL: 0x00008876 (valid, 120832 ns)
+cpu7: MSR_PKGC7_IRTL: 0x00008894 (valid, 151552 ns)
+cpu7: MSR_PKGC8_IRTL: 0x000088fa (valid, 256000 ns)
+cpu7: MSR_PKGC9_IRTL: 0x0000894c (valid, 339968 ns)
+cpu7: MSR_PKGC10_IRTL: 0x00008bf2 (valid, 1034240 ns)
.fi
+.PP
The \fBmax efficiency\fP frequency, a.k.a. Low Frequency Mode, is the frequency
available at the minimum package voltage. The \fBTSC frequency\fP is the base
frequency of the processor -- this should match the brand string
@@ -292,7 +340,7 @@ starts a new interval.
must be run as root.
Alternatively, non-root users can be enabled to run turbostat this way:
-# setcap cap_sys_rawio=ep ./turbostat
+# setcap cap_sys_admin,cap_sys_rawio,cap_sys_nice=+ep ./turbostat
# chmod +r /dev/cpu/*/msr
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index bc5ae0872fed..831dc32d45fa 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -3,7 +3,7 @@
* turbostat -- show CPU frequency and C-state residency
* on modern Intel and AMD processors.
*
- * Copyright (c) 2021 Intel Corporation.
+ * Copyright (c) 2022 Intel Corporation.
* Len Brown <len.brown@intel.com>
*/
@@ -37,6 +37,173 @@
#include <asm/unistd.h>
#include <stdbool.h>
+#define UNUSED(x) (void)(x)
+
+/*
+ * This list matches the column headers, except
+ * 1. built-in only, the sysfs counters are not here -- we learn of those at run-time
+ * 2. Core and CPU are moved to the end, we can't have strings that contain them
+ * matching on them for --show and --hide.
+ */
+
+/*
+ * buffer size used by sscanf() for added column names
+ * Usually truncated to 7 characters, but also handles 18 columns for raw 64-bit counters
+ */
+#define NAME_BYTES 20
+#define PATH_BYTES 128
+
+enum counter_scope { SCOPE_CPU, SCOPE_CORE, SCOPE_PACKAGE };
+enum counter_type { COUNTER_ITEMS, COUNTER_CYCLES, COUNTER_SECONDS, COUNTER_USEC };
+enum counter_format { FORMAT_RAW, FORMAT_DELTA, FORMAT_PERCENT };
+
+struct msr_counter {
+ unsigned int msr_num;
+ char name[NAME_BYTES];
+ char path[PATH_BYTES];
+ unsigned int width;
+ enum counter_type type;
+ enum counter_format format;
+ struct msr_counter *next;
+ unsigned int flags;
+#define FLAGS_HIDE (1 << 0)
+#define FLAGS_SHOW (1 << 1)
+#define SYSFS_PERCPU (1 << 1)
+};
+
+struct msr_counter bic[] = {
+ { 0x0, "usec", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "Time_Of_Day_Seconds", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "Package", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "Node", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "Avg_MHz", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "Busy%", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "Bzy_MHz", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "TSC_MHz", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "IRQ", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "SMI", "", 32, 0, FORMAT_DELTA, NULL, 0 },
+ { 0x0, "sysfs", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "CPU%c1", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "CPU%c3", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "CPU%c6", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "CPU%c7", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "ThreadC", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "CoreTmp", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "CoreCnt", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "PkgTmp", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "GFX%rc6", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "GFXMHz", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "Pkg%pc2", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "Pkg%pc3", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "Pkg%pc6", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "Pkg%pc7", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "Pkg%pc8", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "Pkg%pc9", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "Pk%pc10", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "CPU%LPI", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "SYS%LPI", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "PkgWatt", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "CorWatt", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "GFXWatt", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "PkgCnt", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "RAMWatt", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "PKG_%", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "RAM_%", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "Pkg_J", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "Cor_J", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "GFX_J", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "RAM_J", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "Mod%c6", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "Totl%C0", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "Any%C0", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "GFX%C0", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "CPUGFX%", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "Core", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "CPU", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "APIC", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "X2APIC", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "Die", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "GFXAMHz", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "IPC", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "CoreThr", "", 0, 0, 0, NULL, 0 },
+ { 0x0, "UncMHz", "", 0, 0, 0, NULL, 0 },
+};
+
+#define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
+#define BIC_USEC (1ULL << 0)
+#define BIC_TOD (1ULL << 1)
+#define BIC_Package (1ULL << 2)
+#define BIC_Node (1ULL << 3)
+#define BIC_Avg_MHz (1ULL << 4)
+#define BIC_Busy (1ULL << 5)
+#define BIC_Bzy_MHz (1ULL << 6)
+#define BIC_TSC_MHz (1ULL << 7)
+#define BIC_IRQ (1ULL << 8)
+#define BIC_SMI (1ULL << 9)
+#define BIC_sysfs (1ULL << 10)
+#define BIC_CPU_c1 (1ULL << 11)
+#define BIC_CPU_c3 (1ULL << 12)
+#define BIC_CPU_c6 (1ULL << 13)
+#define BIC_CPU_c7 (1ULL << 14)
+#define BIC_ThreadC (1ULL << 15)
+#define BIC_CoreTmp (1ULL << 16)
+#define BIC_CoreCnt (1ULL << 17)
+#define BIC_PkgTmp (1ULL << 18)
+#define BIC_GFX_rc6 (1ULL << 19)
+#define BIC_GFXMHz (1ULL << 20)
+#define BIC_Pkgpc2 (1ULL << 21)
+#define BIC_Pkgpc3 (1ULL << 22)
+#define BIC_Pkgpc6 (1ULL << 23)
+#define BIC_Pkgpc7 (1ULL << 24)
+#define BIC_Pkgpc8 (1ULL << 25)
+#define BIC_Pkgpc9 (1ULL << 26)
+#define BIC_Pkgpc10 (1ULL << 27)
+#define BIC_CPU_LPI (1ULL << 28)
+#define BIC_SYS_LPI (1ULL << 29)
+#define BIC_PkgWatt (1ULL << 30)
+#define BIC_CorWatt (1ULL << 31)
+#define BIC_GFXWatt (1ULL << 32)
+#define BIC_PkgCnt (1ULL << 33)
+#define BIC_RAMWatt (1ULL << 34)
+#define BIC_PKG__ (1ULL << 35)
+#define BIC_RAM__ (1ULL << 36)
+#define BIC_Pkg_J (1ULL << 37)
+#define BIC_Cor_J (1ULL << 38)
+#define BIC_GFX_J (1ULL << 39)
+#define BIC_RAM_J (1ULL << 40)
+#define BIC_Mod_c6 (1ULL << 41)
+#define BIC_Totl_c0 (1ULL << 42)
+#define BIC_Any_c0 (1ULL << 43)
+#define BIC_GFX_c0 (1ULL << 44)
+#define BIC_CPUGFX (1ULL << 45)
+#define BIC_Core (1ULL << 46)
+#define BIC_CPU (1ULL << 47)
+#define BIC_APIC (1ULL << 48)
+#define BIC_X2APIC (1ULL << 49)
+#define BIC_Die (1ULL << 50)
+#define BIC_GFXACTMHz (1ULL << 51)
+#define BIC_IPC (1ULL << 52)
+#define BIC_CORE_THROT_CNT (1ULL << 53)
+#define BIC_UNCORE_MHZ (1ULL << 54)
+
+#define BIC_TOPOLOGY (BIC_Package | BIC_Node | BIC_CoreCnt | BIC_PkgCnt | BIC_Core | BIC_CPU | BIC_Die )
+#define BIC_THERMAL_PWR ( BIC_CoreTmp | BIC_PkgTmp | BIC_PkgWatt | BIC_CorWatt | BIC_GFXWatt | BIC_RAMWatt | BIC_PKG__ | BIC_RAM__)
+#define BIC_FREQUENCY ( BIC_Avg_MHz | BIC_Busy | BIC_Bzy_MHz | BIC_TSC_MHz | BIC_GFXMHz | BIC_GFXACTMHz | BIC_UNCORE_MHZ)
+#define BIC_IDLE ( BIC_sysfs | BIC_CPU_c1 | BIC_CPU_c3 | BIC_CPU_c6 | BIC_CPU_c7 | BIC_GFX_rc6 | BIC_Pkgpc2 | BIC_Pkgpc3 | BIC_Pkgpc6 | BIC_Pkgpc7 | BIC_Pkgpc8 | BIC_Pkgpc9 | BIC_Pkgpc10 | BIC_CPU_LPI | BIC_SYS_LPI | BIC_Mod_c6 | BIC_Totl_c0 | BIC_Any_c0 | BIC_GFX_c0 | BIC_CPUGFX)
+#define BIC_OTHER ( BIC_IRQ | BIC_SMI | BIC_ThreadC | BIC_CoreTmp | BIC_IPC)
+
+#define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC)
+
+unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAULT);
+unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC;
+
+#define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME)
+#define DO_BIC_READ(COUNTER_NAME) (bic_present & COUNTER_NAME)
+#define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME)
+#define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT)
+#define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT)
+#define BIC_IS_ENABLED(COUNTER_BIT) (bic_enabled & COUNTER_BIT)
+
char *proc_stat = "/proc/stat";
FILE *outf;
int *fd_percpu;
@@ -48,6 +215,7 @@ struct timespec interval_ts = { 5, 0 };
unsigned int model_orig;
unsigned int num_iterations;
+unsigned int header_iterations;
unsigned int debug;
unsigned int quiet;
unsigned int shown;
@@ -62,6 +230,7 @@ unsigned int do_slm_cstates;
unsigned int use_c1_residency_msr;
unsigned int has_aperf;
unsigned int has_epb;
+unsigned int is_hybrid;
unsigned int do_irtl_snb;
unsigned int do_irtl_hsw;
unsigned int units = 1000000; /* MHz etc */
@@ -159,13 +328,6 @@ int ignore_stdin;
#define MAX(a, b) ((a) > (b) ? (a) : (b))
-/*
- * buffer size used by sscanf() for added column names
- * Usually truncated to 7 characters, but also handles 18 columns for raw 64-bit counters
- */
-#define NAME_BYTES 20
-#define PATH_BYTES 128
-
int backwards_count;
char *progname;
@@ -205,6 +367,7 @@ struct core_data {
unsigned int core_temp_c;
unsigned int core_energy; /* MSR_CORE_ENERGY_STAT */
unsigned int core_id;
+ unsigned long long core_throt_cnt;
unsigned long long counter[MAX_ADDED_COUNTERS];
} *core_even, *core_odd;
@@ -233,6 +396,7 @@ struct pkg_data {
unsigned long long rapl_pkg_perf_status; /* MSR_PKG_PERF_STATUS */
unsigned long long rapl_dram_perf_status; /* MSR_DRAM_PERF_STATUS */
unsigned int pkg_temp_c;
+ unsigned int uncore_mhz;
unsigned long long counter[MAX_ADDED_COUNTERS];
} *package_even, *package_odd;
@@ -255,24 +419,6 @@ struct pkg_data {
#define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
-enum counter_scope { SCOPE_CPU, SCOPE_CORE, SCOPE_PACKAGE };
-enum counter_type { COUNTER_ITEMS, COUNTER_CYCLES, COUNTER_SECONDS, COUNTER_USEC };
-enum counter_format { FORMAT_RAW, FORMAT_DELTA, FORMAT_PERCENT };
-
-struct msr_counter {
- unsigned int msr_num;
- char name[NAME_BYTES];
- char path[PATH_BYTES];
- unsigned int width;
- enum counter_type type;
- enum counter_format format;
- struct msr_counter *next;
- unsigned int flags;
-#define FLAGS_HIDE (1 << 0)
-#define FLAGS_SHOW (1 << 1)
-#define SYSFS_PERCPU (1 << 1)
-};
-
/*
* The accumulated sum of MSR is defined as a monotonic
* increasing MSR, it will be accumulated periodically,
@@ -522,8 +668,10 @@ static int perf_instr_count_open(int cpu_num)
/* counter for cpu_num, including user + kernel and all processes */
fd = perf_event_open(&pea, -1, cpu_num, -1, 0);
- if (fd == -1)
- err(-1, "cpu%d: perf instruction counter\n", cpu_num);
+ if (fd == -1) {
+ warn("cpu%d: perf instruction counter", cpu_num);
+ BIC_NOT_PRESENT(BIC_IPC);
+ }
return fd;
}
@@ -550,143 +698,10 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr)
return 0;
}
-/*
- * This list matches the column headers, except
- * 1. built-in only, the sysfs counters are not here -- we learn of those at run-time
- * 2. Core and CPU are moved to the end, we can't have strings that contain them
- * matching on them for --show and --hide.
- */
-struct msr_counter bic[] = {
- { 0x0, "usec" },
- { 0x0, "Time_Of_Day_Seconds" },
- { 0x0, "Package" },
- { 0x0, "Node" },
- { 0x0, "Avg_MHz" },
- { 0x0, "Busy%" },
- { 0x0, "Bzy_MHz" },
- { 0x0, "TSC_MHz" },
- { 0x0, "IRQ" },
- { 0x0, "SMI", "", 32, 0, FORMAT_DELTA, NULL },
- { 0x0, "sysfs" },
- { 0x0, "CPU%c1" },
- { 0x0, "CPU%c3" },
- { 0x0, "CPU%c6" },
- { 0x0, "CPU%c7" },
- { 0x0, "ThreadC" },
- { 0x0, "CoreTmp" },
- { 0x0, "CoreCnt" },
- { 0x0, "PkgTmp" },
- { 0x0, "GFX%rc6" },
- { 0x0, "GFXMHz" },
- { 0x0, "Pkg%pc2" },
- { 0x0, "Pkg%pc3" },
- { 0x0, "Pkg%pc6" },
- { 0x0, "Pkg%pc7" },
- { 0x0, "Pkg%pc8" },
- { 0x0, "Pkg%pc9" },
- { 0x0, "Pk%pc10" },
- { 0x0, "CPU%LPI" },
- { 0x0, "SYS%LPI" },
- { 0x0, "PkgWatt" },
- { 0x0, "CorWatt" },
- { 0x0, "GFXWatt" },
- { 0x0, "PkgCnt" },
- { 0x0, "RAMWatt" },
- { 0x0, "PKG_%" },
- { 0x0, "RAM_%" },
- { 0x0, "Pkg_J" },
- { 0x0, "Cor_J" },
- { 0x0, "GFX_J" },
- { 0x0, "RAM_J" },
- { 0x0, "Mod%c6" },
- { 0x0, "Totl%C0" },
- { 0x0, "Any%C0" },
- { 0x0, "GFX%C0" },
- { 0x0, "CPUGFX%" },
- { 0x0, "Core" },
- { 0x0, "CPU" },
- { 0x0, "APIC" },
- { 0x0, "X2APIC" },
- { 0x0, "Die" },
- { 0x0, "GFXAMHz" },
- { 0x0, "IPC" },
-};
-
-#define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
-#define BIC_USEC (1ULL << 0)
-#define BIC_TOD (1ULL << 1)
-#define BIC_Package (1ULL << 2)
-#define BIC_Node (1ULL << 3)
-#define BIC_Avg_MHz (1ULL << 4)
-#define BIC_Busy (1ULL << 5)
-#define BIC_Bzy_MHz (1ULL << 6)
-#define BIC_TSC_MHz (1ULL << 7)
-#define BIC_IRQ (1ULL << 8)
-#define BIC_SMI (1ULL << 9)
-#define BIC_sysfs (1ULL << 10)
-#define BIC_CPU_c1 (1ULL << 11)
-#define BIC_CPU_c3 (1ULL << 12)
-#define BIC_CPU_c6 (1ULL << 13)
-#define BIC_CPU_c7 (1ULL << 14)
-#define BIC_ThreadC (1ULL << 15)
-#define BIC_CoreTmp (1ULL << 16)
-#define BIC_CoreCnt (1ULL << 17)
-#define BIC_PkgTmp (1ULL << 18)
-#define BIC_GFX_rc6 (1ULL << 19)
-#define BIC_GFXMHz (1ULL << 20)
-#define BIC_Pkgpc2 (1ULL << 21)
-#define BIC_Pkgpc3 (1ULL << 22)
-#define BIC_Pkgpc6 (1ULL << 23)
-#define BIC_Pkgpc7 (1ULL << 24)
-#define BIC_Pkgpc8 (1ULL << 25)
-#define BIC_Pkgpc9 (1ULL << 26)
-#define BIC_Pkgpc10 (1ULL << 27)
-#define BIC_CPU_LPI (1ULL << 28)
-#define BIC_SYS_LPI (1ULL << 29)
-#define BIC_PkgWatt (1ULL << 30)
-#define BIC_CorWatt (1ULL << 31)
-#define BIC_GFXWatt (1ULL << 32)
-#define BIC_PkgCnt (1ULL << 33)
-#define BIC_RAMWatt (1ULL << 34)
-#define BIC_PKG__ (1ULL << 35)
-#define BIC_RAM__ (1ULL << 36)
-#define BIC_Pkg_J (1ULL << 37)
-#define BIC_Cor_J (1ULL << 38)
-#define BIC_GFX_J (1ULL << 39)
-#define BIC_RAM_J (1ULL << 40)
-#define BIC_Mod_c6 (1ULL << 41)
-#define BIC_Totl_c0 (1ULL << 42)
-#define BIC_Any_c0 (1ULL << 43)
-#define BIC_GFX_c0 (1ULL << 44)
-#define BIC_CPUGFX (1ULL << 45)
-#define BIC_Core (1ULL << 46)
-#define BIC_CPU (1ULL << 47)
-#define BIC_APIC (1ULL << 48)
-#define BIC_X2APIC (1ULL << 49)
-#define BIC_Die (1ULL << 50)
-#define BIC_GFXACTMHz (1ULL << 51)
-#define BIC_IPC (1ULL << 52)
-
-#define BIC_TOPOLOGY (BIC_Package | BIC_Node | BIC_CoreCnt | BIC_PkgCnt | BIC_Core | BIC_CPU | BIC_Die )
-#define BIC_THERMAL_PWR ( BIC_CoreTmp | BIC_PkgTmp | BIC_PkgWatt | BIC_CorWatt | BIC_GFXWatt | BIC_RAMWatt | BIC_PKG__ | BIC_RAM__)
-#define BIC_FREQUENCY ( BIC_Avg_MHz | BIC_Busy | BIC_Bzy_MHz | BIC_TSC_MHz | BIC_GFXMHz | BIC_GFXACTMHz )
-#define BIC_IDLE ( BIC_sysfs | BIC_CPU_c1 | BIC_CPU_c3 | BIC_CPU_c6 | BIC_CPU_c7 | BIC_GFX_rc6 | BIC_Pkgpc2 | BIC_Pkgpc3 | BIC_Pkgpc6 | BIC_Pkgpc7 | BIC_Pkgpc8 | BIC_Pkgpc9 | BIC_Pkgpc10 | BIC_CPU_LPI | BIC_SYS_LPI | BIC_Mod_c6 | BIC_Totl_c0 | BIC_Any_c0 | BIC_GFX_c0 | BIC_CPUGFX)
-#define BIC_OTHER ( BIC_IRQ | BIC_SMI | BIC_ThreadC | BIC_CoreTmp | BIC_IPC)
-
-#define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC)
-
-unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAULT);
-unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC;
-
-#define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME)
-#define DO_BIC_READ(COUNTER_NAME) (bic_present & COUNTER_NAME)
-#define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME)
-#define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT)
-#define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT)
-#define BIC_IS_ENABLED(COUNTER_BIT) (bic_enabled & COUNTER_BIT)
-
#define MAX_DEFERRED 16
+char *deferred_add_names[MAX_DEFERRED];
char *deferred_skip_names[MAX_DEFERRED];
+int deferred_add_index;
int deferred_skip_index;
/*
@@ -720,6 +735,8 @@ void help(void)
" -l, --list list column headers only\n"
" -n, --num_iterations num\n"
" number of the measurement iterations\n"
+ " -N, --header_iterations num\n"
+ " print header every num iterations\n"
" -o, --out file\n"
" create or truncate \"file\" for all output\n"
" -q, --quiet skip decoding system configuration header\n"
@@ -741,7 +758,7 @@ void help(void)
*/
unsigned long long bic_lookup(char *name_list, enum show_hide_mode mode)
{
- int i;
+ unsigned int i;
unsigned long long retval = 0;
while (name_list) {
@@ -752,40 +769,51 @@ unsigned long long bic_lookup(char *name_list, enum show_hide_mode mode)
if (comma)
*comma = '\0';
- if (!strcmp(name_list, "all"))
- return ~0;
- if (!strcmp(name_list, "topology"))
- return BIC_TOPOLOGY;
- if (!strcmp(name_list, "power"))
- return BIC_THERMAL_PWR;
- if (!strcmp(name_list, "idle"))
- return BIC_IDLE;
- if (!strcmp(name_list, "frequency"))
- return BIC_FREQUENCY;
- if (!strcmp(name_list, "other"))
- return BIC_OTHER;
- if (!strcmp(name_list, "all"))
- return 0;
-
for (i = 0; i < MAX_BIC; ++i) {
if (!strcmp(name_list, bic[i].name)) {
retval |= (1ULL << i);
break;
}
+ if (!strcmp(name_list, "all")) {
+ retval |= ~0;
+ break;
+ } else if (!strcmp(name_list, "topology")) {
+ retval |= BIC_TOPOLOGY;
+ break;
+ } else if (!strcmp(name_list, "power")) {
+ retval |= BIC_THERMAL_PWR;
+ break;
+ } else if (!strcmp(name_list, "idle")) {
+ retval |= BIC_IDLE;
+ break;
+ } else if (!strcmp(name_list, "frequency")) {
+ retval |= BIC_FREQUENCY;
+ break;
+ } else if (!strcmp(name_list, "other")) {
+ retval |= BIC_OTHER;
+ break;
+ }
+
}
if (i == MAX_BIC) {
if (mode == SHOW_LIST) {
- fprintf(stderr, "Invalid counter name: %s\n", name_list);
- exit(-1);
- }
- deferred_skip_names[deferred_skip_index++] = name_list;
- if (debug)
- fprintf(stderr, "deferred \"%s\"\n", name_list);
- if (deferred_skip_index >= MAX_DEFERRED) {
- fprintf(stderr, "More than max %d un-recognized --skip options '%s'\n",
- MAX_DEFERRED, name_list);
- help();
- exit(1);
+ deferred_add_names[deferred_add_index++] = name_list;
+ if (deferred_add_index >= MAX_DEFERRED) {
+ fprintf(stderr, "More than max %d un-recognized --add options '%s'\n",
+ MAX_DEFERRED, name_list);
+ help();
+ exit(1);
+ }
+ } else {
+ deferred_skip_names[deferred_skip_index++] = name_list;
+ if (debug)
+ fprintf(stderr, "deferred \"%s\"\n", name_list);
+ if (deferred_skip_index >= MAX_DEFERRED) {
+ fprintf(stderr, "More than max %d un-recognized --skip options '%s'\n",
+ MAX_DEFERRED, name_list);
+ help();
+ exit(1);
+ }
}
}
@@ -872,6 +900,9 @@ void print_header(char *delim)
if (DO_BIC(BIC_CoreTmp))
outp += sprintf(outp, "%sCoreTmp", (printed++ ? delim : ""));
+ if (DO_BIC(BIC_CORE_THROT_CNT))
+ outp += sprintf(outp, "%sCoreThr", (printed++ ? delim : ""));
+
if (do_rapl && !rapl_joules) {
if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY))
outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : ""));
@@ -961,6 +992,9 @@ void print_header(char *delim)
if (DO_BIC(BIC_RAM__))
outp += sprintf(outp, "%sRAM_%%", (printed++ ? delim : ""));
}
+ if (DO_BIC(BIC_UNCORE_MHZ))
+ outp += sprintf(outp, "%sUncMHz", (printed++ ? delim : ""));
+
for (mp = sys.pp; mp; mp = mp->next) {
if (mp->format == FORMAT_RAW) {
if (mp->width == 64)
@@ -1011,6 +1045,7 @@ int dump_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p
outp += sprintf(outp, "c6: %016llX\n", c->c6);
outp += sprintf(outp, "c7: %016llX\n", c->c7);
outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c);
+ outp += sprintf(outp, "cpu_throt_count: %016llX\n", c->core_throt_cnt);
outp += sprintf(outp, "Joules: %0X\n", c->core_energy);
for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
@@ -1225,6 +1260,10 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
if (DO_BIC(BIC_CoreTmp))
outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), c->core_temp_c);
+ /* Core throttle count */
+ if (DO_BIC(BIC_CORE_THROT_CNT))
+ outp += sprintf(outp, "%s%lld", (printed++ ? delim : ""), c->core_throt_cnt);
+
for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
if (mp->format == FORMAT_RAW) {
if (mp->width == 32)
@@ -1311,6 +1350,7 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
if (DO_BIC(BIC_PkgWatt))
outp +=
sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float);
+
if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY))
outp +=
sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float);
@@ -1337,6 +1377,9 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data
outp +=
sprintf(outp, fmt8, (printed++ ? delim : ""),
100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
+ /* UncMHz */
+ if (DO_BIC(BIC_UNCORE_MHZ))
+ outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), p->uncore_mhz);
for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) {
if (mp->format == FORMAT_RAW) {
@@ -1386,14 +1429,14 @@ void flush_output_stderr(void)
void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
{
- static int printed;
+ static int count;
- if (!printed || !summary_only)
+ if ((!count || (header_iterations && !(count % header_iterations))) || !summary_only)
print_header("\t");
format_counters(&average.threads, &average.cores, &average.packages);
- printed = 1;
+ count++;
if (summary_only)
return;
@@ -1438,6 +1481,7 @@ int delta_package(struct pkg_data *new, struct pkg_data *old)
else
old->gfx_rc6_ms = new->gfx_rc6_ms - old->gfx_rc6_ms;
+ old->uncore_mhz = new->uncore_mhz;
old->gfx_mhz = new->gfx_mhz;
old->gfx_act_mhz = new->gfx_act_mhz;
@@ -1467,6 +1511,7 @@ void delta_core(struct core_data *new, struct core_data *old)
old->c6 = new->c6 - old->c6;
old->c7 = new->c7 - old->c7;
old->core_temp_c = new->core_temp_c;
+ old->core_throt_cnt = new->core_throt_cnt;
old->mc6_us = new->mc6_us - old->mc6_us;
DELTA_WRAP32(new->core_energy, old->core_energy);
@@ -1626,6 +1671,7 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
c->mc6_us = 0;
c->core_temp_c = 0;
c->core_energy = 0;
+ c->core_throt_cnt = 0;
p->pkg_wtd_core_c0 = 0;
p->pkg_any_core_c0 = 0;
@@ -1654,6 +1700,7 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
p->pkg_temp_c = 0;
p->gfx_rc6_ms = 0;
+ p->uncore_mhz = 0;
p->gfx_mhz = 0;
p->gfx_act_mhz = 0;
for (i = 0, mp = sys.tp; mp; i++, mp = mp->next)
@@ -1710,6 +1757,7 @@ int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
average.cores.mc6_us += c->mc6_us;
average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c);
+ average.cores.core_throt_cnt = MAX(average.cores.core_throt_cnt, c->core_throt_cnt);
average.cores.core_energy += c->core_energy;
@@ -1752,6 +1800,7 @@ int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
average.packages.energy_gfx += p->energy_gfx;
average.packages.gfx_rc6_ms = p->gfx_rc6_ms;
+ average.packages.uncore_mhz = p->uncore_mhz;
average.packages.gfx_mhz = p->gfx_mhz;
average.packages.gfx_act_mhz = p->gfx_act_mhz;
@@ -1912,6 +1961,16 @@ int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp)
return 0;
}
+unsigned long long get_uncore_mhz(int package, int die)
+{
+ char path[128];
+
+ sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/current_freq_khz", package,
+ die);
+
+ return (snapshot_sysfs_counter(path) / 1000);
+}
+
int get_epb(int cpu)
{
char path[128 + PATH_BYTES];
@@ -1987,6 +2046,26 @@ void get_apic_id(struct thread_data *t)
fprintf(outf, "cpu%d: BIOS BUG: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id);
}
+int get_core_throt_cnt(int cpu, unsigned long long *cnt)
+{
+ char path[128 + PATH_BYTES];
+ unsigned long long tmp;
+ FILE *fp;
+ int ret;
+
+ sprintf(path, "/sys/devices/system/cpu/cpu%d/thermal_throttle/core_throttle_count", cpu);
+ fp = fopen(path, "r");
+ if (!fp)
+ return -1;
+ ret = fscanf(fp, "%lld", &tmp);
+ fclose(fp);
+ if (ret != 1)
+ return -1;
+ *cnt = tmp;
+
+ return 0;
+}
+
/*
* get_counters(...)
* migrate to cpu
@@ -2129,6 +2208,9 @@ retry:
c->core_temp_c = tj_max - ((msr >> 16) & 0x7F);
}
+ if (DO_BIC(BIC_CORE_THROT_CNT))
+ get_core_throt_cnt(cpu, &c->core_throt_cnt);
+
if (do_rapl & RAPL_AMD_F17H) {
if (get_msr(cpu, MSR_CORE_ENERGY_STAT, &msr))
return -14;
@@ -2238,6 +2320,10 @@ retry:
if (DO_BIC(BIC_GFX_rc6))
p->gfx_rc6_ms = gfx_cur_rc6_ms;
+ /* n.b. assume die0 uncore frequency applies to whole package */
+ if (DO_BIC(BIC_UNCORE_MHZ))
+ p->uncore_mhz = get_uncore_mhz(p->package_id, 0);
+
if (DO_BIC(BIC_GFXMHz))
p->gfx_mhz = gfx_cur_mhz;
@@ -2428,24 +2514,30 @@ int has_turbo_ratio_group_limits(int family, int model)
if (!genuine_intel)
return 0;
+ if (family != 6)
+ return 0;
+
switch (model) {
case INTEL_FAM6_ATOM_GOLDMONT:
case INTEL_FAM6_SKYLAKE_X:
case INTEL_FAM6_ICELAKE_X:
+ case INTEL_FAM6_SAPPHIRERAPIDS_X:
case INTEL_FAM6_ATOM_GOLDMONT_D:
case INTEL_FAM6_ATOM_TREMONT_D:
return 1;
+ default:
+ return 0;
}
- return 0;
}
-static void dump_turbo_ratio_limits(int family, int model)
+static void dump_turbo_ratio_limits(int trl_msr_offset, int family, int model)
{
unsigned long long msr, core_counts;
- unsigned int ratio, group_size;
+ int shift;
- get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
- fprintf(outf, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", base_cpu, msr);
+ get_msr(base_cpu, trl_msr_offset, &msr);
+ fprintf(outf, "cpu%d: MSR_%sTURBO_RATIO_LIMIT: 0x%08llx\n",
+ base_cpu, trl_msr_offset == MSR_SECONDARY_TURBO_RATIO_LIMIT ? "SECONDARY" : "", msr);
if (has_turbo_ratio_group_limits(family, model)) {
get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &core_counts);
@@ -2454,53 +2546,16 @@ static void dump_turbo_ratio_limits(int family, int model)
core_counts = 0x0807060504030201;
}
- ratio = (msr >> 56) & 0xFF;
- group_size = (core_counts >> 56) & 0xFF;
- if (ratio)
- fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
- ratio, bclk, ratio * bclk, group_size);
-
- ratio = (msr >> 48) & 0xFF;
- group_size = (core_counts >> 48) & 0xFF;
- if (ratio)
- fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
- ratio, bclk, ratio * bclk, group_size);
-
- ratio = (msr >> 40) & 0xFF;
- group_size = (core_counts >> 40) & 0xFF;
- if (ratio)
- fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
- ratio, bclk, ratio * bclk, group_size);
-
- ratio = (msr >> 32) & 0xFF;
- group_size = (core_counts >> 32) & 0xFF;
- if (ratio)
- fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
- ratio, bclk, ratio * bclk, group_size);
+ for (shift = 56; shift >= 0; shift -= 8) {
+ unsigned int ratio, group_size;
- ratio = (msr >> 24) & 0xFF;
- group_size = (core_counts >> 24) & 0xFF;
- if (ratio)
- fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
- ratio, bclk, ratio * bclk, group_size);
-
- ratio = (msr >> 16) & 0xFF;
- group_size = (core_counts >> 16) & 0xFF;
- if (ratio)
- fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
- ratio, bclk, ratio * bclk, group_size);
-
- ratio = (msr >> 8) & 0xFF;
- group_size = (core_counts >> 8) & 0xFF;
- if (ratio)
- fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
- ratio, bclk, ratio * bclk, group_size);
+ ratio = (msr >> shift) & 0xFF;
+ group_size = (core_counts >> shift) & 0xFF;
+ if (ratio)
+ fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
+ ratio, bclk, ratio * bclk, group_size);
+ }
- ratio = (msr >> 0) & 0xFF;
- group_size = (core_counts >> 0) & 0xFF;
- if (ratio)
- fprintf(outf, "%d * %.1f = %.1f MHz max turbo %d active cores\n",
- ratio, bclk, ratio * bclk, group_size);
return;
}
@@ -2913,7 +2968,7 @@ int get_thread_siblings(struct cpu_topology *thiscpu)
}
}
}
- } while (!strncmp(&character, ",", 1));
+ } while (character == ',');
fclose(filep);
return CPU_COUNT_S(size, thiscpu->put_ids);
@@ -3027,6 +3082,8 @@ void set_max_cpu_num(void)
*/
int count_cpus(int cpu)
{
+ UNUSED(cpu);
+
topo.num_cpus++;
return 0;
}
@@ -3361,6 +3418,9 @@ static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg
int i, ret;
int cpu = t->cpu_id;
+ UNUSED(c);
+ UNUSED(p);
+
for (i = IDX_PKG_ENERGY; i < IDX_COUNT; i++) {
unsigned long long msr_cur, msr_last;
off_t offset;
@@ -3387,6 +3447,8 @@ static int update_msr_sum(struct thread_data *t, struct core_data *c, struct pkg
static void msr_record_handler(union sigval v)
{
+ UNUSED(v);
+
for_all_cpus(update_msr_sum, EVEN_COUNTERS);
}
@@ -3439,6 +3501,9 @@ release_msr:
/*
* set_my_sched_priority(pri)
* return previous
+ *
+ * if non-root, do this:
+ * # /sbin/setcap cap_sys_rawio,cap_sys_nice=+ep /usr/bin/turbostat
*/
int set_my_sched_priority(int priority)
{
@@ -3457,7 +3522,7 @@ int set_my_sched_priority(int priority)
errno = 0;
retval = getpriority(PRIO_PROCESS, 0);
if (retval != priority)
- err(-1, "getpriority(%d) != setpriority(%d)", retval, priority);
+ err(retval, "getpriority(%d) != setpriority(%d)", retval, priority);
return original_priority;
}
@@ -3466,7 +3531,7 @@ void turbostat_loop()
{
int retval;
int restarted = 0;
- int done_iters = 0;
+ unsigned int done_iters = 0;
setup_signal_handler();
@@ -3669,6 +3734,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
has_misc_feature_control = 1;
break;
case INTEL_FAM6_SKYLAKE_X: /* SKX */
+ case INTEL_FAM6_SAPPHIRERAPIDS_X: /* SPR */
pkg_cstate_limits = skx_pkg_cstate_limits;
has_misc_feature_control = 1;
break;
@@ -3678,6 +3744,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
break;
case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */
no_MSR_MISC_PWR_MGMT = 1;
+ /* FALLTHRU */
case INTEL_FAM6_ATOM_SILVERMONT_D: /* AVN */
pkg_cstate_limits = slv_pkg_cstate_limits;
break;
@@ -3721,6 +3788,9 @@ int has_slv_msrs(unsigned int family, unsigned int model)
if (!genuine_intel)
return 0;
+ if (family != 6)
+ return 0;
+
switch (model) {
case INTEL_FAM6_ATOM_SILVERMONT:
case INTEL_FAM6_ATOM_SILVERMONT_MID:
@@ -3736,6 +3806,9 @@ int is_dnv(unsigned int family, unsigned int model)
if (!genuine_intel)
return 0;
+ if (family != 6)
+ return 0;
+
switch (model) {
case INTEL_FAM6_ATOM_GOLDMONT_D:
return 1;
@@ -3749,6 +3822,9 @@ int is_bdx(unsigned int family, unsigned int model)
if (!genuine_intel)
return 0;
+ if (family != 6)
+ return 0;
+
switch (model) {
case INTEL_FAM6_BROADWELL_X:
return 1;
@@ -3762,6 +3838,9 @@ int is_skx(unsigned int family, unsigned int model)
if (!genuine_intel)
return 0;
+ if (family != 6)
+ return 0;
+
switch (model) {
case INTEL_FAM6_SKYLAKE_X:
return 1;
@@ -3775,6 +3854,9 @@ int is_icx(unsigned int family, unsigned int model)
if (!genuine_intel)
return 0;
+ if (family != 6)
+ return 0;
+
switch (model) {
case INTEL_FAM6_ICELAKE_X:
return 1;
@@ -3782,11 +3864,30 @@ int is_icx(unsigned int family, unsigned int model)
return 0;
}
+int is_spr(unsigned int family, unsigned int model)
+{
+
+ if (!genuine_intel)
+ return 0;
+
+ if (family != 6)
+ return 0;
+
+ switch (model) {
+ case INTEL_FAM6_SAPPHIRERAPIDS_X:
+ return 1;
+ }
+ return 0;
+}
+
int is_ehl(unsigned int family, unsigned int model)
{
if (!genuine_intel)
return 0;
+ if (family != 6)
+ return 0;
+
switch (model) {
case INTEL_FAM6_ATOM_TREMONT:
return 1;
@@ -3799,6 +3900,9 @@ int is_jvl(unsigned int family, unsigned int model)
if (!genuine_intel)
return 0;
+ if (family != 6)
+ return 0;
+
switch (model) {
case INTEL_FAM6_ATOM_TREMONT_D:
return 1;
@@ -3811,6 +3915,9 @@ int has_turbo_ratio_limit(unsigned int family, unsigned int model)
if (has_slv_msrs(family, model))
return 0;
+ if (family != 6)
+ return 0;
+
switch (model) {
/* Nehalem compatible, but do not include turbo-ratio limit support */
case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */
@@ -3890,6 +3997,7 @@ int has_glm_turbo_ratio_limit(unsigned int family, unsigned int model)
case INTEL_FAM6_ATOM_GOLDMONT:
case INTEL_FAM6_SKYLAKE_X:
case INTEL_FAM6_ICELAKE_X:
+ case INTEL_FAM6_SAPPHIRERAPIDS_X:
return 1;
default:
return 0;
@@ -3917,7 +4025,7 @@ int has_config_tdp(unsigned int family, unsigned int model)
case INTEL_FAM6_CANNONLAKE_L: /* CNL */
case INTEL_FAM6_SKYLAKE_X: /* SKX */
case INTEL_FAM6_ICELAKE_X: /* ICX */
-
+ case INTEL_FAM6_SAPPHIRERAPIDS_X: /* SPR */
case INTEL_FAM6_XEON_PHI_KNL: /* Knights Landing */
return 1;
default:
@@ -3985,8 +4093,12 @@ static void dump_cstate_pstate_config_info(unsigned int family, unsigned int mod
if (has_ivt_turbo_ratio_limit(family, model))
dump_ivt_turbo_ratio_limits();
- if (has_turbo_ratio_limit(family, model))
- dump_turbo_ratio_limits(family, model);
+ if (has_turbo_ratio_limit(family, model)) {
+ dump_turbo_ratio_limits(MSR_TURBO_RATIO_LIMIT, family, model);
+
+ if (is_hybrid)
+ dump_turbo_ratio_limits(MSR_SECONDARY_TURBO_RATIO_LIMIT, family, model);
+ }
if (has_atom_turbo_ratio_limit(family, model))
dump_atom_turbo_ratio_limits();
@@ -4000,6 +4112,24 @@ static void dump_cstate_pstate_config_info(unsigned int family, unsigned int mod
dump_nhm_cst_cfg();
}
+static int read_sysfs_int(char *path)
+{
+ FILE *input;
+ int retval = -1;
+
+ input = fopen(path, "r");
+ if (input == NULL) {
+ if (debug)
+ fprintf(outf, "NSFOD %s\n", path);
+ return (-1);
+ }
+ if (fscanf(input, "%d", &retval) != 1)
+ err(1, "%s: failed to read int from file", path);
+ fclose(input);
+
+ return (retval);
+}
+
static void dump_sysfs_file(char *path)
{
FILE *input;
@@ -4018,6 +4148,48 @@ static void dump_sysfs_file(char *path)
fprintf(outf, "%s: %s", strrchr(path, '/') + 1, cpuidle_buf);
}
+static void intel_uncore_frequency_probe(void)
+{
+ int i, j;
+ char path[128];
+
+ if (!genuine_intel)
+ return;
+
+ if (access("/sys/devices/system/cpu/intel_uncore_frequency/package_00_die_00", R_OK))
+ return;
+
+ if (!access("/sys/devices/system/cpu/intel_uncore_frequency/package_00_die_00/current_freq_khz", R_OK))
+ BIC_PRESENT(BIC_UNCORE_MHZ);
+
+ if (quiet)
+ return;
+
+ for (i = 0; i < topo.num_packages; ++i) {
+ for (j = 0; j < topo.num_die; ++j) {
+ int k, l;
+
+ sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/min_freq_khz",
+ i, j);
+ k = read_sysfs_int(path);
+ sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/max_freq_khz",
+ i, j);
+ l = read_sysfs_int(path);
+ fprintf(outf, "Uncore Frequency pkg%d die%d: %d - %d MHz ", i, j, k / 1000, l / 1000);
+
+ sprintf(path,
+ "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/initial_min_freq_khz",
+ i, j);
+ k = read_sysfs_int(path);
+ sprintf(path,
+ "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/initial_max_freq_khz",
+ i, j);
+ l = read_sysfs_int(path);
+ fprintf(outf, "(%d - %d MHz)\n", k / 1000, l / 1000);
+ }
+ }
+}
+
static void dump_sysfs_cstate_config(void)
{
char path[64];
@@ -4125,6 +4297,9 @@ int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
char *epb_string;
int cpu, epb;
+ UNUSED(c);
+ UNUSED(p);
+
if (!has_epb)
return 0;
@@ -4171,6 +4346,9 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
unsigned long long msr;
int cpu;
+ UNUSED(c);
+ UNUSED(p);
+
if (!has_hwp)
return 0;
@@ -4254,6 +4432,9 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data
unsigned long long msr;
int cpu;
+ UNUSED(c);
+ UNUSED(p);
+
cpu = t->cpu_id;
/* per-package */
@@ -4359,6 +4540,8 @@ double get_tdp_intel(unsigned int model)
double get_tdp_amd(unsigned int family)
{
+ UNUSED(family);
+
/* This is the max stock TDP of HEDT/Server Fam17h+ chips */
return 280.0;
}
@@ -4376,6 +4559,8 @@ static double rapl_dram_energy_units_probe(int model, double rapl_energy_units)
case INTEL_FAM6_BROADWELL_X: /* BDX */
case INTEL_FAM6_SKYLAKE_X: /* SKX */
case INTEL_FAM6_XEON_PHI_KNL: /* KNL */
+ case INTEL_FAM6_ICELAKE_X: /* ICX */
+ case INTEL_FAM6_SAPPHIRERAPIDS_X: /* SPR */
return (rapl_dram_energy_units = 15.3 / 1000000);
default:
return (rapl_energy_units);
@@ -4465,6 +4650,7 @@ void rapl_probe_intel(unsigned int family, unsigned int model)
case INTEL_FAM6_BROADWELL_X: /* BDX */
case INTEL_FAM6_SKYLAKE_X: /* SKX */
case INTEL_FAM6_ICELAKE_X: /* ICX */
+ case INTEL_FAM6_SAPPHIRERAPIDS_X: /* SPR */
case INTEL_FAM6_XEON_PHI_KNL: /* KNL */
do_rapl =
RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS |
@@ -4559,6 +4745,8 @@ void rapl_probe_amd(unsigned int family, unsigned int model)
unsigned int has_rapl = 0;
double tdp;
+ UNUSED(model);
+
if (max_extended_level >= 0x80000007) {
__cpuid(0x80000007, eax, ebx, ecx, edx);
/* RAPL (Fam 17h+) */
@@ -4617,6 +4805,7 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model)
case INTEL_FAM6_HASWELL_L: /* HSW */
case INTEL_FAM6_HASWELL_G: /* HSW */
do_gfx_perf_limit_reasons = 1;
+ /* FALLTHRU */
case INTEL_FAM6_HASWELL_X: /* HSX */
do_core_perf_limit_reasons = 1;
do_ring_perf_limit_reasons = 1;
@@ -4627,13 +4816,19 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model)
void automatic_cstate_conversion_probe(unsigned int family, unsigned int model)
{
- if (is_skx(family, model) || is_bdx(family, model) || is_icx(family, model))
+ if (family != 6)
+ return;
+
+ switch (model) {
+ case INTEL_FAM6_BROADWELL_X:
+ case INTEL_FAM6_SKYLAKE_X:
has_automatic_cstate_conversion = 1;
+ }
}
void prewake_cstate_probe(unsigned int family, unsigned int model)
{
- if (is_icx(family, model))
+ if (is_icx(family, model) || is_spr(family, model))
dis_cstate_prewake = 1;
}
@@ -4643,6 +4838,9 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
unsigned int dts, dts2;
int cpu;
+ UNUSED(c);
+ UNUSED(p);
+
if (!(do_dts || do_ptm))
return 0;
@@ -4698,7 +4896,7 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
{
- fprintf(outf, "cpu%d: %s: %sabled (%f Watts, %f sec, clamp %sabled)\n",
+ fprintf(outf, "cpu%d: %s: %sabled (%0.3f Watts, %f sec, clamp %sabled)\n",
cpu, label,
((msr >> 15) & 1) ? "EN" : "DIS",
((msr >> 0) & 0x7FFF) * rapl_power_units,
@@ -4714,6 +4912,9 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
const char *msr_name;
int cpu;
+ UNUSED(c);
+ UNUSED(p);
+
if (!do_rapl)
return 0;
@@ -4762,12 +4963,19 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
cpu, msr, (msr >> 63) & 1 ? "" : "UN");
print_power_limit_msr(cpu, msr, "PKG Limit #1");
- fprintf(outf, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n",
+ fprintf(outf, "cpu%d: PKG Limit #2: %sabled (%0.3f Watts, %f* sec, clamp %sabled)\n",
cpu,
((msr >> 47) & 1) ? "EN" : "DIS",
((msr >> 32) & 0x7FFF) * rapl_power_units,
(1.0 + (((msr >> 54) & 0x3) / 4.0)) * (1 << ((msr >> 49) & 0x1F)) * rapl_time_units,
((msr >> 48) & 1) ? "EN" : "DIS");
+
+ if (get_msr(cpu, MSR_VR_CURRENT_CONFIG, &msr))
+ return -9;
+
+ fprintf(outf, "cpu%d: MSR_VR_CURRENT_CONFIG: 0x%08llx\n", cpu, msr);
+ fprintf(outf, "cpu%d: PKG Limit #4: %f Watts (%slocked)\n",
+ cpu, ((msr >> 0) & 0x1FFF) * rapl_power_units, (msr >> 31) & 1 ? "" : "UN");
}
if (do_rapl & RAPL_DRAM_POWER_INFO) {
@@ -4830,6 +5038,9 @@ int has_snb_msrs(unsigned int family, unsigned int model)
if (!genuine_intel)
return 0;
+ if (family != 6)
+ return 0;
+
switch (model) {
case INTEL_FAM6_SANDYBRIDGE:
case INTEL_FAM6_SANDYBRIDGE_X:
@@ -4846,6 +5057,7 @@ int has_snb_msrs(unsigned int family, unsigned int model)
case INTEL_FAM6_CANNONLAKE_L: /* CNL */
case INTEL_FAM6_SKYLAKE_X: /* SKX */
case INTEL_FAM6_ICELAKE_X: /* ICX */
+ case INTEL_FAM6_SAPPHIRERAPIDS_X: /* SPR */
case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
case INTEL_FAM6_ATOM_GOLDMONT_D: /* DNV */
@@ -4873,6 +5085,9 @@ int has_c8910_msrs(unsigned int family, unsigned int model)
if (!genuine_intel)
return 0;
+ if (family != 6)
+ return 0;
+
switch (model) {
case INTEL_FAM6_HASWELL_L: /* HSW */
case INTEL_FAM6_BROADWELL: /* BDW */
@@ -4899,6 +5114,9 @@ int has_skl_msrs(unsigned int family, unsigned int model)
if (!genuine_intel)
return 0;
+ if (family != 6)
+ return 0;
+
switch (model) {
case INTEL_FAM6_SKYLAKE_L: /* SKL */
case INTEL_FAM6_CANNONLAKE_L: /* CNL */
@@ -4911,6 +5129,10 @@ int is_slm(unsigned int family, unsigned int model)
{
if (!genuine_intel)
return 0;
+
+ if (family != 6)
+ return 0;
+
switch (model) {
case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */
case INTEL_FAM6_ATOM_SILVERMONT_D: /* AVN */
@@ -4923,6 +5145,10 @@ int is_knl(unsigned int family, unsigned int model)
{
if (!genuine_intel)
return 0;
+
+ if (family != 6)
+ return 0;
+
switch (model) {
case INTEL_FAM6_XEON_PHI_KNL: /* KNL */
return 1;
@@ -4935,6 +5161,9 @@ int is_cnl(unsigned int family, unsigned int model)
if (!genuine_intel)
return 0;
+ if (family != 6)
+ return 0;
+
switch (model) {
case INTEL_FAM6_CANNONLAKE_L: /* CNL */
return 1;
@@ -4989,6 +5218,9 @@ int get_cpu_type(struct thread_data *t, struct core_data *c, struct pkg_data *p)
{
unsigned int eax, ebx, ecx, edx;
+ UNUSED(c);
+ UNUSED(p);
+
if (!genuine_intel)
return 0;
@@ -5025,6 +5257,9 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
unsigned int tcc_default, tcc_offset;
int cpu;
+ UNUSED(c);
+ UNUSED(p);
+
/* tj_max is used only for dts or ptm */
if (!(do_dts || do_ptm))
return 0;
@@ -5209,13 +5444,15 @@ unsigned int intel_model_duplicates(unsigned int model)
case INTEL_FAM6_LAKEFIELD:
case INTEL_FAM6_ALDERLAKE:
case INTEL_FAM6_ALDERLAKE_L:
+ case INTEL_FAM6_ALDERLAKE_N:
+ case INTEL_FAM6_RAPTORLAKE:
+ case INTEL_FAM6_RAPTORLAKE_P:
return INTEL_FAM6_CANNONLAKE_L;
case INTEL_FAM6_ATOM_TREMONT_L:
return INTEL_FAM6_ATOM_TREMONT;
case INTEL_FAM6_ICELAKE_D:
- case INTEL_FAM6_SAPPHIRERAPIDS_X:
return INTEL_FAM6_ICELAKE_X;
}
return model;
@@ -5246,7 +5483,7 @@ void print_dev_latency(void)
}
/*
- * Linux-perf manages the the HW instructions-retired counter
+ * Linux-perf manages the HW instructions-retired counter
* by enabling when requested, and hiding rollover
*/
void linux_perf_init(void)
@@ -5391,7 +5628,10 @@ void process_cpuid()
__cpuid_count(0x7, 0, eax, ebx, ecx, edx);
has_sgx = ebx & (1 << 2);
- fprintf(outf, "CPUID(7): %sSGX\n", has_sgx ? "" : "No-");
+
+ is_hybrid = edx & (1 << 15);
+
+ fprintf(outf, "CPUID(7): %sSGX %sHybrid\n", has_sgx ? "" : "No-", is_hybrid ? "" : "No-");
if (has_sgx)
decode_feature_control_msr();
@@ -5502,7 +5742,7 @@ void process_cpuid()
BIC_NOT_PRESENT(BIC_Pkgpc7);
use_c1_residency_msr = 1;
}
- if (is_skx(family, model) || is_icx(family, model)) {
+ if (is_skx(family, model) || is_icx(family, model) || is_spr(family, model)) {
BIC_NOT_PRESENT(BIC_CPU_c3);
BIC_NOT_PRESENT(BIC_Pkgpc3);
BIC_NOT_PRESENT(BIC_CPU_c7);
@@ -5547,6 +5787,7 @@ void process_cpuid()
if (!quiet)
dump_cstate_pstate_config_info(family, model);
+ intel_uncore_frequency_probe();
if (!quiet)
print_dev_latency();
@@ -5572,6 +5813,11 @@ void process_cpuid()
else
BIC_NOT_PRESENT(BIC_CPU_LPI);
+ if (!access("/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count", R_OK))
+ BIC_PRESENT(BIC_CORE_THROT_CNT);
+ else
+ BIC_NOT_PRESENT(BIC_CORE_THROT_CNT);
+
if (!access(sys_lpi_file_sysfs, R_OK)) {
sys_lpi_file = sys_lpi_file_sysfs;
BIC_PRESENT(BIC_SYS_LPI);
@@ -5601,11 +5847,6 @@ int dir_filter(const struct dirent *dirp)
return 0;
}
-int open_dev_cpu_msr(int dummy1)
-{
- return 0;
-}
-
void topology_probe()
{
int i;
@@ -5896,6 +6137,9 @@ void turbostat_init()
if (!quiet && do_irtl_snb)
print_irtl();
+
+ if (DO_BIC(BIC_IPC))
+ (void)get_instr_count_fd(base_cpu);
}
int fork_it(char **argv)
@@ -5973,7 +6217,30 @@ int get_and_dump_counters(void)
void print_version()
{
- fprintf(outf, "turbostat version 21.05.04" " - Len Brown <lenb@kernel.org>\n");
+ fprintf(outf, "turbostat version 2022.07.28 - Len Brown <lenb@kernel.org>\n");
+}
+
+#define COMMAND_LINE_SIZE 2048
+
+void print_bootcmd(void)
+{
+ char bootcmd[COMMAND_LINE_SIZE];
+ FILE *fp;
+ int ret;
+
+ memset(bootcmd, 0, COMMAND_LINE_SIZE);
+ fp = fopen("/proc/cmdline", "r");
+ if (!fp)
+ return;
+
+ ret = fread(bootcmd, sizeof(char), COMMAND_LINE_SIZE - 1, fp);
+ if (ret) {
+ bootcmd[ret] = '\0';
+ /* the last character is already '\n' */
+ fprintf(outf, "Kernel command line: %s", bootcmd);
+ }
+
+ fclose(fp);
}
int add_counter(unsigned int msr_num, char *path, char *name,
@@ -6138,6 +6405,16 @@ next:
}
}
+int is_deferred_add(char *name)
+{
+ int i;
+
+ for (i = 0; i < deferred_add_index; ++i)
+ if (!strcmp(name, deferred_add_names[i]))
+ return 1;
+ return 0;
+}
+
int is_deferred_skip(char *name)
{
int i;
@@ -6156,9 +6433,6 @@ void probe_sysfs(void)
int state;
char *sp;
- if (!DO_BIC(BIC_sysfs))
- return;
-
for (state = 10; state >= 0; --state) {
sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name", base_cpu, state);
@@ -6181,6 +6455,9 @@ void probe_sysfs(void)
sprintf(path, "cpuidle/state%d/time", state);
+ if (!DO_BIC(BIC_sysfs) && !is_deferred_add(name_buf))
+ continue;
+
if (is_deferred_skip(name_buf))
continue;
@@ -6206,6 +6483,9 @@ void probe_sysfs(void)
sprintf(path, "cpuidle/state%d/usage", state);
+ if (!DO_BIC(BIC_sysfs) && !is_deferred_add(name_buf))
+ continue;
+
if (is_deferred_skip(name_buf))
continue;
@@ -6313,6 +6593,7 @@ void cmdline(int argc, char **argv)
{ "interval", required_argument, 0, 'i' },
{ "IPC", no_argument, 0, 'I' },
{ "num_iterations", required_argument, 0, 'n' },
+ { "header_iterations", required_argument, 0, 'N' },
{ "help", no_argument, 0, 'h' },
{ "hide", required_argument, 0, 'H' }, // meh, -h taken by --help
{ "Joules", no_argument, 0, 'J' },
@@ -6394,6 +6675,14 @@ void cmdline(int argc, char **argv)
exit(2);
}
break;
+ case 'N':
+ header_iterations = strtod(optarg, NULL);
+
+ if (header_iterations <= 0) {
+ fprintf(outf, "iterations %d should be positive number\n", header_iterations);
+ exit(2);
+ }
+ break;
case 's':
/*
* --show: show only those specified
@@ -6425,13 +6714,17 @@ int main(int argc, char **argv)
outf = stderr;
cmdline(argc, argv);
- if (!quiet)
+ if (!quiet) {
print_version();
+ print_bootcmd();
+ }
probe_sysfs();
turbostat_init();
+ msr_sum_record();
+
/* dump counters and exit */
if (dump_only)
return get_and_dump_counters();
@@ -6443,7 +6736,6 @@ int main(int argc, char **argv)
return 0;
}
- msr_sum_record();
/*
* if any params left, it must be a command to fork
*/
diff --git a/tools/spi/spidev_test.c b/tools/spi/spidev_test.c
index 83844f8b862a..b0ca44c70e83 100644
--- a/tools/spi/spidev_test.c
+++ b/tools/spi/spidev_test.c
@@ -417,6 +417,7 @@ int main(int argc, char *argv[])
{
int ret = 0;
int fd;
+ uint32_t request;
parse_opts(argc, argv);
@@ -430,13 +431,23 @@ int main(int argc, char *argv[])
/*
* spi mode
*/
+ /* WR is make a request to assign 'mode' */
+ request = mode;
ret = ioctl(fd, SPI_IOC_WR_MODE32, &mode);
if (ret == -1)
pabort("can't set spi mode");
+ /* RD is read what mode the device actually is in */
ret = ioctl(fd, SPI_IOC_RD_MODE32, &mode);
if (ret == -1)
pabort("can't get spi mode");
+ /* Drivers can reject some mode bits without returning an error.
+ * Read the current value to identify what mode it is in, and if it
+ * differs from the requested mode, warn the user.
+ */
+ if (request != mode)
+ printf("WARNING device does not support requested mode 0x%x\n",
+ request);
/*
* bits per word
diff --git a/tools/testing/crypto/chacha20-s390/Makefile b/tools/testing/crypto/chacha20-s390/Makefile
new file mode 100644
index 000000000000..db81cd2fb9c5
--- /dev/null
+++ b/tools/testing/crypto/chacha20-s390/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (C) 2022 Red Hat, Inc.
+# Author: Vladis Dronov <vdronoff@gmail.com>
+
+obj-m += test_cipher.o
+test_cipher-y := test-cipher.o
+
+all:
+ make -C /lib/modules/$(shell uname -r)/build/ M=$(PWD) modules
+clean:
+ make -C /lib/modules/$(shell uname -r)/build/ M=$(PWD) clean
diff --git a/tools/testing/crypto/chacha20-s390/run-tests.sh b/tools/testing/crypto/chacha20-s390/run-tests.sh
new file mode 100644
index 000000000000..43108794b996
--- /dev/null
+++ b/tools/testing/crypto/chacha20-s390/run-tests.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (C) 2022 Red Hat, Inc.
+# Author: Vladis Dronov <vdronoff@gmail.com>
+#
+# This script runs (via instmod) test-cipher.ko module which invokes
+# generic and s390-native ChaCha20 encryprion algorithms with different
+# size of data. Check 'dmesg' for results.
+#
+# The insmod error is expected:
+# insmod: ERROR: could not insert module test_cipher.ko: Operation not permitted
+
+lsmod | grep chacha | cut -f1 -d' ' | xargs rmmod
+modprobe chacha_generic
+modprobe chacha_s390
+
+# run encryption for different data size, including whole block(s) +/- 1
+insmod test_cipher.ko size=63
+insmod test_cipher.ko size=64
+insmod test_cipher.ko size=65
+insmod test_cipher.ko size=127
+insmod test_cipher.ko size=128
+insmod test_cipher.ko size=129
+insmod test_cipher.ko size=511
+insmod test_cipher.ko size=512
+insmod test_cipher.ko size=513
+insmod test_cipher.ko size=4096
+insmod test_cipher.ko size=65611
+insmod test_cipher.ko size=6291456
+insmod test_cipher.ko size=62914560
+
+# print test logs
+dmesg | tail -170
diff --git a/tools/testing/crypto/chacha20-s390/test-cipher.c b/tools/testing/crypto/chacha20-s390/test-cipher.c
new file mode 100644
index 000000000000..34e8b855266f
--- /dev/null
+++ b/tools/testing/crypto/chacha20-s390/test-cipher.c
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2022 Red Hat, Inc.
+ * Author: Vladis Dronov <vdronoff@gmail.com>
+ */
+
+#include <asm/elf.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include <crypto/skcipher.h>
+#include <crypto/akcipher.h>
+#include <crypto/acompress.h>
+#include <crypto/rng.h>
+#include <crypto/drbg.h>
+#include <crypto/kpp.h>
+#include <crypto/internal/simd.h>
+#include <crypto/chacha.h>
+#include <crypto/aead.h>
+#include <crypto/hash.h>
+#include <linux/crypto.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/fips.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/scatterlist.h>
+#include <linux/time.h>
+#include <linux/vmalloc.h>
+#include <linux/zlib.h>
+#include <linux/once.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+static unsigned int data_size __read_mostly = 256;
+static unsigned int debug __read_mostly = 0;
+
+/* tie all skcipher structures together */
+struct skcipher_def {
+ struct scatterlist sginp, sgout;
+ struct crypto_skcipher *tfm;
+ struct skcipher_request *req;
+ struct crypto_wait wait;
+};
+
+/* Perform cipher operations with the chacha lib */
+static int test_lib_chacha(u8 *revert, u8 *cipher, u8 *plain)
+{
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ u8 iv[16], key[32];
+ u64 start, end;
+
+ memset(key, 'X', sizeof(key));
+ memset(iv, 'I', sizeof(iv));
+
+ if (debug) {
+ print_hex_dump(KERN_INFO, "key: ", DUMP_PREFIX_OFFSET,
+ 16, 1, key, 32, 1);
+
+ print_hex_dump(KERN_INFO, "iv: ", DUMP_PREFIX_OFFSET,
+ 16, 1, iv, 16, 1);
+ }
+
+ /* Encrypt */
+ chacha_init_arch(chacha_state, (u32*)key, iv);
+
+ start = ktime_get_ns();
+ chacha_crypt_arch(chacha_state, cipher, plain, data_size, 20);
+ end = ktime_get_ns();
+
+
+ if (debug)
+ print_hex_dump(KERN_INFO, "encr:", DUMP_PREFIX_OFFSET,
+ 16, 1, cipher,
+ (data_size > 64 ? 64 : data_size), 1);
+
+ pr_info("lib encryption took: %lld nsec", end - start);
+
+ /* Decrypt */
+ chacha_init_arch(chacha_state, (u32 *)key, iv);
+
+ start = ktime_get_ns();
+ chacha_crypt_arch(chacha_state, revert, cipher, data_size, 20);
+ end = ktime_get_ns();
+
+ if (debug)
+ print_hex_dump(KERN_INFO, "decr:", DUMP_PREFIX_OFFSET,
+ 16, 1, revert,
+ (data_size > 64 ? 64 : data_size), 1);
+
+ pr_info("lib decryption took: %lld nsec", end - start);
+
+ return 0;
+}
+
+/* Perform cipher operations with skcipher */
+static unsigned int test_skcipher_encdec(struct skcipher_def *sk,
+ int enc)
+{
+ int rc;
+
+ if (enc) {
+ rc = crypto_wait_req(crypto_skcipher_encrypt(sk->req),
+ &sk->wait);
+ if (rc)
+ pr_info("skcipher encrypt returned with result"
+ "%d\n", rc);
+ }
+ else
+ {
+ rc = crypto_wait_req(crypto_skcipher_decrypt(sk->req),
+ &sk->wait);
+ if (rc)
+ pr_info("skcipher decrypt returned with result"
+ "%d\n", rc);
+ }
+
+ return rc;
+}
+
+/* Initialize and trigger cipher operations */
+static int test_skcipher(char *name, u8 *revert, u8 *cipher, u8 *plain)
+{
+ struct skcipher_def sk;
+ struct crypto_skcipher *skcipher = NULL;
+ struct skcipher_request *req = NULL;
+ u8 iv[16], key[32];
+ u64 start, end;
+ int ret = -EFAULT;
+
+ skcipher = crypto_alloc_skcipher(name, 0, 0);
+ if (IS_ERR(skcipher)) {
+ pr_info("could not allocate skcipher %s handle\n", name);
+ return PTR_ERR(skcipher);
+ }
+
+ req = skcipher_request_alloc(skcipher, GFP_KERNEL);
+ if (!req) {
+ pr_info("could not allocate skcipher request\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done,
+ &sk.wait);
+
+ memset(key, 'X', sizeof(key));
+ memset(iv, 'I', sizeof(iv));
+
+ if (crypto_skcipher_setkey(skcipher, key, 32)) {
+ pr_info("key could not be set\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (debug) {
+ print_hex_dump(KERN_INFO, "key: ", DUMP_PREFIX_OFFSET,
+ 16, 1, key, 32, 1);
+
+ print_hex_dump(KERN_INFO, "iv: ", DUMP_PREFIX_OFFSET,
+ 16, 1, iv, 16, 1);
+ }
+
+ sk.tfm = skcipher;
+ sk.req = req;
+
+ /* Encrypt in one pass */
+ sg_init_one(&sk.sginp, plain, data_size);
+ sg_init_one(&sk.sgout, cipher, data_size);
+ skcipher_request_set_crypt(req, &sk.sginp, &sk.sgout,
+ data_size, iv);
+ crypto_init_wait(&sk.wait);
+
+ /* Encrypt data */
+ start = ktime_get_ns();
+ ret = test_skcipher_encdec(&sk, 1);
+ end = ktime_get_ns();
+
+ if (ret)
+ goto out;
+
+ pr_info("%s tfm encryption successful, took %lld nsec\n", name, end - start);
+
+ if (debug)
+ print_hex_dump(KERN_INFO, "encr:", DUMP_PREFIX_OFFSET,
+ 16, 1, cipher,
+ (data_size > 64 ? 64 : data_size), 1);
+
+ /* Prepare for decryption */
+ memset(iv, 'I', sizeof(iv));
+
+ sg_init_one(&sk.sginp, cipher, data_size);
+ sg_init_one(&sk.sgout, revert, data_size);
+ skcipher_request_set_crypt(req, &sk.sginp, &sk.sgout,
+ data_size, iv);
+ crypto_init_wait(&sk.wait);
+
+ /* Decrypt data */
+ start = ktime_get_ns();
+ ret = test_skcipher_encdec(&sk, 0);
+ end = ktime_get_ns();
+
+ if (ret)
+ goto out;
+
+ pr_info("%s tfm decryption successful, took %lld nsec\n", name, end - start);
+
+ if (debug)
+ print_hex_dump(KERN_INFO, "decr:", DUMP_PREFIX_OFFSET,
+ 16, 1, revert,
+ (data_size > 64 ? 64 : data_size), 1);
+
+ /* Dump some internal skcipher data */
+ if (debug)
+ pr_info("skcipher %s: cryptlen %d blksize %d stride %d "
+ "ivsize %d alignmask 0x%x\n",
+ name, sk.req->cryptlen,
+ crypto_skcipher_blocksize(sk.tfm),
+ crypto_skcipher_alg(sk.tfm)->walksize,
+ crypto_skcipher_ivsize(sk.tfm),
+ crypto_skcipher_alignmask(sk.tfm));
+
+out:
+ if (skcipher)
+ crypto_free_skcipher(skcipher);
+ if (req)
+ skcipher_request_free(req);
+ return ret;
+}
+
+static int __init chacha_s390_test_init(void)
+{
+ u8 *plain = NULL, *revert = NULL;
+ u8 *cipher_generic = NULL, *cipher_s390 = NULL;
+ int ret = -1;
+
+ pr_info("s390 ChaCha20 test module: size=%d debug=%d\n",
+ data_size, debug);
+
+ /* Allocate and fill buffers */
+ plain = vmalloc(data_size);
+ if (!plain) {
+ pr_info("could not allocate plain buffer\n");
+ ret = -2;
+ goto out;
+ }
+ memset(plain, 'a', data_size);
+ get_random_bytes(plain, (data_size > 256 ? 256 : data_size));
+
+ cipher_generic = vmalloc(data_size);
+ if (!cipher_generic) {
+ pr_info("could not allocate cipher_generic buffer\n");
+ ret = -2;
+ goto out;
+ }
+ memset(cipher_generic, 0, data_size);
+
+ cipher_s390 = vmalloc(data_size);
+ if (!cipher_s390) {
+ pr_info("could not allocate cipher_s390 buffer\n");
+ ret = -2;
+ goto out;
+ }
+ memset(cipher_s390, 0, data_size);
+
+ revert = vmalloc(data_size);
+ if (!revert) {
+ pr_info("could not allocate revert buffer\n");
+ ret = -2;
+ goto out;
+ }
+ memset(revert, 0, data_size);
+
+ if (debug)
+ print_hex_dump(KERN_INFO, "src: ", DUMP_PREFIX_OFFSET,
+ 16, 1, plain,
+ (data_size > 64 ? 64 : data_size), 1);
+
+ /* Use chacha20 generic */
+ ret = test_skcipher("chacha20-generic", revert, cipher_generic, plain);
+ if (ret)
+ goto out;
+
+ if (memcmp(plain, revert, data_size)) {
+ pr_info("generic en/decryption check FAILED\n");
+ ret = -2;
+ goto out;
+ }
+ else
+ pr_info("generic en/decryption check OK\n");
+
+ memset(revert, 0, data_size);
+
+ /* Use chacha20 s390 */
+ ret = test_skcipher("chacha20-s390", revert, cipher_s390, plain);
+ if (ret)
+ goto out;
+
+ if (memcmp(plain, revert, data_size)) {
+ pr_info("s390 en/decryption check FAILED\n");
+ ret = -2;
+ goto out;
+ }
+ else
+ pr_info("s390 en/decryption check OK\n");
+
+ if (memcmp(cipher_generic, cipher_s390, data_size)) {
+ pr_info("s390 vs generic check FAILED\n");
+ ret = -2;
+ goto out;
+ }
+ else
+ pr_info("s390 vs generic check OK\n");
+
+ memset(cipher_s390, 0, data_size);
+ memset(revert, 0, data_size);
+
+ /* Use chacha20 lib */
+ test_lib_chacha(revert, cipher_s390, plain);
+
+ if (memcmp(plain, revert, data_size)) {
+ pr_info("lib en/decryption check FAILED\n");
+ ret = -2;
+ goto out;
+ }
+ else
+ pr_info("lib en/decryption check OK\n");
+
+ if (memcmp(cipher_generic, cipher_s390, data_size)) {
+ pr_info("lib vs generic check FAILED\n");
+ ret = -2;
+ goto out;
+ }
+ else
+ pr_info("lib vs generic check OK\n");
+
+ pr_info("--- chacha20 s390 test end ---\n");
+
+out:
+ if (plain)
+ vfree(plain);
+ if (cipher_generic)
+ vfree(cipher_generic);
+ if (cipher_s390)
+ vfree(cipher_s390);
+ if (revert)
+ vfree(revert);
+
+ return -1;
+}
+
+static void __exit chacha_s390_test_exit(void)
+{
+ pr_info("s390 ChaCha20 test module exit\n");
+}
+
+module_param_named(size, data_size, uint, 0660);
+module_param(debug, int, 0660);
+MODULE_PARM_DESC(size, "Size of a plaintext");
+MODULE_PARM_DESC(debug, "Debug level (0=off,1=on)");
+
+module_init(chacha_s390_test_init);
+module_exit(chacha_s390_test_exit);
+
+MODULE_DESCRIPTION("s390 ChaCha20 self-test");
+MODULE_AUTHOR("Vladis Dronov <vdronoff@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild
index 82e49ab0937d..33543231d453 100644
--- a/tools/testing/cxl/Kbuild
+++ b/tools/testing/cxl/Kbuild
@@ -8,6 +8,8 @@ ldflags-y += --wrap=devm_cxl_port_enumerate_dports
ldflags-y += --wrap=devm_cxl_setup_hdm
ldflags-y += --wrap=devm_cxl_add_passthrough_decoder
ldflags-y += --wrap=devm_cxl_enumerate_decoders
+ldflags-y += --wrap=cxl_await_media_ready
+ldflags-y += --wrap=cxl_hdm_decode_init
DRIVERS := ../../../drivers
CXL_SRC := $(DRIVERS)/cxl
@@ -34,7 +36,6 @@ cxl_port-y += config_check.o
obj-m += cxl_mem.o
cxl_mem-y := $(CXL_SRC)/mem.o
-cxl_mem-y += mock_mem.o
cxl_mem-y += config_check.o
obj-m += cxl_core.o
diff --git a/tools/testing/cxl/mock_mem.c b/tools/testing/cxl/mock_mem.c
deleted file mode 100644
index d1dec5845139..000000000000
--- a/tools/testing/cxl/mock_mem.c
+++ /dev/null
@@ -1,10 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
-
-#include <linux/types.h>
-
-struct cxl_dev_state;
-bool cxl_dvsec_decode_init(struct cxl_dev_state *cxlds)
-{
- return true;
-}
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index b6b726eff3e2..6b9239b2afd4 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -237,25 +237,11 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *
return rc;
}
-static int cxl_mock_wait_media_ready(struct cxl_dev_state *cxlds)
-{
- msleep(100);
- return 0;
-}
-
static void label_area_release(void *lsa)
{
vfree(lsa);
}
-static void mock_validate_dvsec_ranges(struct cxl_dev_state *cxlds)
-{
- struct cxl_endpoint_dvsec_info *info;
-
- info = &cxlds->info;
- info->mem_enabled = true;
-}
-
static int cxl_mock_mem_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -278,7 +264,6 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
cxlds->serial = pdev->id;
cxlds->mbox_send = cxl_mock_mbox_send;
- cxlds->wait_media_ready = cxl_mock_wait_media_ready;
cxlds->payload_size = SZ_4K;
rc = cxl_enumerate_cmds(cxlds);
@@ -293,8 +278,6 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
if (rc)
return rc;
- mock_validate_dvsec_ranges(cxlds);
-
cxlmd = devm_cxl_add_memdev(cxlds);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c
index 6e8c9d63c92d..f1f8c40948c5 100644
--- a/tools/testing/cxl/test/mock.c
+++ b/tools/testing/cxl/test/mock.c
@@ -193,6 +193,35 @@ int __wrap_devm_cxl_port_enumerate_dports(struct cxl_port *port)
}
EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_port_enumerate_dports, CXL);
+int __wrap_cxl_await_media_ready(struct cxl_dev_state *cxlds)
+{
+ int rc, index;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (ops && ops->is_mock_dev(cxlds->dev))
+ rc = 0;
+ else
+ rc = cxl_await_media_ready(cxlds);
+ put_cxl_mock_ops(index);
+
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(__wrap_cxl_await_media_ready, CXL);
+
+bool __wrap_cxl_hdm_decode_init(struct cxl_dev_state *cxlds,
+ struct cxl_hdm *cxlhdm)
+{
+ int rc = 0, index;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (!ops || !ops->is_mock_dev(cxlds->dev))
+ rc = cxl_hdm_decode_init(cxlds, cxlhdm);
+ put_cxl_mock_ops(index);
+
+ return rc;
+}
+EXPORT_SYMBOL_NS_GPL(__wrap_cxl_hdm_decode_init, CXL);
+
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(ACPI);
MODULE_IMPORT_NS(CXL);
diff --git a/tools/testing/kunit/configs/all_tests_uml.config b/tools/testing/kunit/configs/all_tests_uml.config
new file mode 100644
index 000000000000..bdee36bef4a3
--- /dev/null
+++ b/tools/testing/kunit/configs/all_tests_uml.config
@@ -0,0 +1,37 @@
+# This config enables as many tests as possible under UML.
+# It is intended for use in continuous integration systems and similar for
+# automated testing of as much as possible.
+# The config is manually maintained, though it uses KUNIT_ALL_TESTS=y to enable
+# any tests whose dependencies are already satisfied. Please feel free to add
+# more options if they any new tests.
+
+CONFIG_KUNIT=y
+CONFIG_KUNIT_EXAMPLE_TEST=y
+CONFIG_KUNIT_ALL_TESTS=y
+
+CONFIG_IIO=y
+
+CONFIG_EXT4_FS=y
+
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+
+CONFIG_VIRTIO_UML=y
+CONFIG_UML_PCI_OVER_VIRTIO=y
+CONFIG_PCI=y
+CONFIG_USB4=y
+
+CONFIG_NET=y
+CONFIG_MCTP=y
+
+CONFIG_INET=y
+CONFIG_MPTCP=y
+
+CONFIG_DAMON=y
+CONFIG_DAMON_VADDR=y
+CONFIG_DAMON_PADDR=y
+CONFIG_DEBUG_FS=y
+CONFIG_DAMON_DBGFS=y
+
+CONFIG_SECURITY=y
+CONFIG_SECURITY_APPARMOR=y
diff --git a/tools/testing/kunit/configs/arch_uml.config b/tools/testing/kunit/configs/arch_uml.config
new file mode 100644
index 000000000000..e824ce43b05a
--- /dev/null
+++ b/tools/testing/kunit/configs/arch_uml.config
@@ -0,0 +1,5 @@
+# Config options which are added to UML builds by default
+
+# Enable virtio/pci, as a lot of tests require it.
+CONFIG_VIRTIO_UML=y
+CONFIG_UML_PCI_OVER_VIRTIO=y
diff --git a/tools/testing/kunit/configs/coverage_uml.config b/tools/testing/kunit/configs/coverage_uml.config
new file mode 100644
index 000000000000..bacb77664fa8
--- /dev/null
+++ b/tools/testing/kunit/configs/coverage_uml.config
@@ -0,0 +1,11 @@
+# This config fragment enables coverage on UML, which is different from the
+# normal gcov used in other arches (no debugfs).
+# Example usage:
+# ./tools/testing/kunit/kunit.py run \
+# --kunitconfig=tools/testing/kunit/configs/all_tests_uml.config \
+# --kunitconfig=tools/testing/kunit/configs/coverage_uml.config
+
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
+CONFIG_GCOV=y
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 9274c6355809..e132b0654029 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -10,6 +10,7 @@
import argparse
import os
import re
+import shlex
import sys
import time
@@ -22,6 +23,7 @@ from typing import Iterable, List, Optional, Sequence, Tuple
import kunit_json
import kunit_kernel
import kunit_parser
+from kunit_printer import stdout
class KunitStatus(Enum):
SUCCESS = auto()
@@ -47,11 +49,11 @@ class KunitBuildRequest(KunitConfigRequest):
@dataclass
class KunitParseRequest:
raw_output: Optional[str]
- build_dir: str
json: Optional[str]
@dataclass
class KunitExecRequest(KunitParseRequest):
+ build_dir: str
timeout: int
alltests: bool
filter_glob: str
@@ -63,8 +65,6 @@ class KunitRequest(KunitExecRequest, KunitBuildRequest):
pass
-KernelDirectoryPath = sys.argv[0].split('tools/testing/kunit/')[0]
-
def get_kernel_root_path() -> str:
path = sys.argv[0] if not __file__ else __file__
parts = os.path.realpath(path).split('tools/testing/kunit')
@@ -74,7 +74,7 @@ def get_kernel_root_path() -> str:
def config_tests(linux: kunit_kernel.LinuxSourceTree,
request: KunitConfigRequest) -> KunitResult:
- kunit_parser.print_with_timestamp('Configuring KUnit Kernel ...')
+ stdout.print_with_timestamp('Configuring KUnit Kernel ...')
config_start = time.time()
success = linux.build_reconfig(request.build_dir, request.make_options)
@@ -87,7 +87,7 @@ def config_tests(linux: kunit_kernel.LinuxSourceTree,
def build_tests(linux: kunit_kernel.LinuxSourceTree,
request: KunitBuildRequest) -> KunitResult:
- kunit_parser.print_with_timestamp('Building KUnit Kernel ...')
+ stdout.print_with_timestamp('Building KUnit Kernel ...')
build_start = time.time()
success = linux.build_kernel(request.alltests,
@@ -126,7 +126,7 @@ def _list_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest)
lines.pop()
# Filter out any extraneous non-test output that might have gotten mixed in.
- return [l for l in lines if re.match('^[^\s.]+\.[^\s.]+$', l)]
+ return [l for l in lines if re.match(r'^[^\s.]+\.[^\s.]+$', l)]
def _suites_from_test_list(tests: List[str]) -> List[str]:
"""Extracts all the suites from an ordered list of tests."""
@@ -155,10 +155,12 @@ def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -
test_glob = request.filter_glob.split('.', maxsplit=2)[1]
filter_globs = [g + '.'+ test_glob for g in filter_globs]
+ metadata = kunit_json.Metadata(arch=linux.arch(), build_dir=request.build_dir, def_config='kunit_defconfig')
+
test_counts = kunit_parser.TestCounts()
exec_time = 0.0
for i, filter_glob in enumerate(filter_globs):
- kunit_parser.print_with_timestamp('Starting KUnit Kernel ({}/{})...'.format(i+1, len(filter_globs)))
+ stdout.print_with_timestamp('Starting KUnit Kernel ({}/{})...'.format(i+1, len(filter_globs)))
test_start = time.time()
run_result = linux.run_kernel(
@@ -167,7 +169,7 @@ def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -
filter_glob=filter_glob,
build_dir=request.build_dir)
- _, test_result = parse_tests(request, run_result)
+ _, test_result = parse_tests(request, metadata, run_result)
# run_kernel() doesn't block on the kernel exiting.
# That only happens after we get the last line of output from `run_result`.
# So exec_time here actually contains parsing + execution time, which is fine.
@@ -188,10 +190,9 @@ def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -
def _map_to_overall_status(test_status: kunit_parser.TestStatus) -> KunitStatus:
if test_status in (kunit_parser.TestStatus.SUCCESS, kunit_parser.TestStatus.SKIPPED):
return KunitStatus.SUCCESS
- else:
- return KunitStatus.TEST_FAILURE
+ return KunitStatus.TEST_FAILURE
-def parse_tests(request: KunitParseRequest, input_data: Iterable[str]) -> Tuple[KunitResult, kunit_parser.Test]:
+def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input_data: Iterable[str]) -> Tuple[KunitResult, kunit_parser.Test]:
parse_start = time.time()
test_result = kunit_parser.Test()
@@ -206,8 +207,6 @@ def parse_tests(request: KunitParseRequest, input_data: Iterable[str]) -> Tuple[
pass
elif request.raw_output == 'kunit':
output = kunit_parser.extract_tap_lines(output)
- else:
- print(f'Unknown --raw_output option "{request.raw_output}"', file=sys.stderr)
for line in output:
print(line.rstrip())
@@ -216,13 +215,16 @@ def parse_tests(request: KunitParseRequest, input_data: Iterable[str]) -> Tuple[
parse_end = time.time()
if request.json:
- json_obj = kunit_json.get_json_result(
+ json_str = kunit_json.get_json_result(
test=test_result,
- def_config='kunit_defconfig',
- build_dir=request.build_dir,
- json_path=request.json)
+ metadata=metadata)
if request.json == 'stdout':
- print(json_obj)
+ print(json_str)
+ else:
+ with open(request.json, 'w') as f:
+ f.write(json_str)
+ stdout.print_with_timestamp("Test results stored in %s" %
+ os.path.abspath(request.json))
if test_result.status != kunit_parser.TestStatus.SUCCESS:
return KunitResult(KunitStatus.TEST_FAILURE, parse_end - parse_start), test_result
@@ -245,7 +247,7 @@ def run_tests(linux: kunit_kernel.LinuxSourceTree,
run_end = time.time()
- kunit_parser.print_with_timestamp((
+ stdout.print_with_timestamp((
'Elapsed time: %.3fs total, %.3fs configuring, %.3fs ' +
'building, %.3fs running\n') % (
run_end - run_start,
@@ -281,22 +283,23 @@ def add_common_opts(parser) -> None:
parser.add_argument('--build_dir',
help='As in the make command, it specifies the build '
'directory.',
- type=str, default='.kunit', metavar='build_dir')
+ type=str, default='.kunit', metavar='DIR')
parser.add_argument('--make_options',
help='X=Y make option, can be repeated.',
- action='append')
+ action='append', metavar='X=Y')
parser.add_argument('--alltests',
help='Run all KUnit tests through allyesconfig',
action='store_true')
parser.add_argument('--kunitconfig',
help='Path to Kconfig fragment that enables KUnit tests.'
' If given a directory, (e.g. lib/kunit), "/.kunitconfig" '
- 'will get automatically appended.',
- metavar='kunitconfig')
+ 'will get automatically appended. If repeated, the files '
+ 'blindly concatenated, which might not work in all cases.',
+ action='append', metavar='PATHS')
parser.add_argument('--kconfig_add',
help='Additional Kconfig options to append to the '
'.kunitconfig, e.g. CONFIG_KASAN=y. Can be repeated.',
- action='append')
+ action='append', metavar='CONFIG_X=Y')
parser.add_argument('--arch',
help=('Specifies the architecture to run tests under. '
@@ -304,7 +307,7 @@ def add_common_opts(parser) -> None:
'string passed to the ARCH make param, '
'e.g. i386, x86_64, arm, um, etc. Non-UML '
'architectures run on QEMU.'),
- type=str, default='um', metavar='arch')
+ type=str, default='um', metavar='ARCH')
parser.add_argument('--cross_compile',
help=('Sets make\'s CROSS_COMPILE variable; it should '
@@ -316,18 +319,22 @@ def add_common_opts(parser) -> None:
'if you have downloaded the microblaze toolchain '
'from the 0-day website to a directory in your '
'home directory called `toolchains`).'),
- metavar='cross_compile')
+ metavar='PREFIX')
parser.add_argument('--qemu_config',
help=('Takes a path to a path to a file containing '
'a QemuArchParams object.'),
- type=str, metavar='qemu_config')
+ type=str, metavar='FILE')
+
+ parser.add_argument('--qemu_args',
+ help='Additional QEMU arguments, e.g. "-smp 8"',
+ action='append', metavar='')
def add_build_opts(parser) -> None:
parser.add_argument('--jobs',
help='As in the make command, "Specifies the number of '
'jobs (commands) to run simultaneously."',
- type=int, default=get_default_jobs(), metavar='jobs')
+ type=int, default=get_default_jobs(), metavar='N')
def add_exec_opts(parser) -> None:
parser.add_argument('--timeout',
@@ -336,7 +343,7 @@ def add_exec_opts(parser) -> None:
'tests.',
type=int,
default=300,
- metavar='timeout')
+ metavar='SECONDS')
parser.add_argument('filter_glob',
help='Filter which KUnit test suites/tests run at '
'boot-time, e.g. list* or list*.*del_test',
@@ -346,26 +353,44 @@ def add_exec_opts(parser) -> None:
metavar='filter_glob')
parser.add_argument('--kernel_args',
help='Kernel command-line parameters. Maybe be repeated',
- action='append')
+ action='append', metavar='')
parser.add_argument('--run_isolated', help='If set, boot the kernel for each '
'individual suite/test. This is can be useful for debugging '
'a non-hermetic test, one that might pass/fail based on '
'what ran before it.',
type=str,
- choices=['suite', 'test']),
+ choices=['suite', 'test'])
def add_parse_opts(parser) -> None:
parser.add_argument('--raw_output', help='If set don\'t format output from kernel. '
'If set to --raw_output=kunit, filters to just KUnit output.',
- type=str, nargs='?', const='all', default=None)
+ type=str, nargs='?', const='all', default=None, choices=['all', 'kunit'])
parser.add_argument('--json',
nargs='?',
help='Stores test results in a JSON, and either '
'prints to stdout or saves to file if a '
'filename is specified',
- type=str, const='stdout', default=None)
+ type=str, const='stdout', default=None, metavar='FILE')
-def main(argv, linux=None):
+
+def tree_from_args(cli_args: argparse.Namespace) -> kunit_kernel.LinuxSourceTree:
+ """Returns a LinuxSourceTree based on the user's arguments."""
+ # Allow users to specify multiple arguments in one string, e.g. '-smp 8'
+ qemu_args: List[str] = []
+ if cli_args.qemu_args:
+ for arg in cli_args.qemu_args:
+ qemu_args.extend(shlex.split(arg))
+
+ return kunit_kernel.LinuxSourceTree(cli_args.build_dir,
+ kunitconfig_paths=cli_args.kunitconfig,
+ kconfig_add=cli_args.kconfig_add,
+ arch=cli_args.arch,
+ cross_compile=cli_args.cross_compile,
+ qemu_config_path=cli_args.qemu_config,
+ extra_qemu_args=qemu_args)
+
+
+def main(argv):
parser = argparse.ArgumentParser(
description='Helps writing and running KUnit tests.')
subparser = parser.add_subparsers(dest='subcommand')
@@ -412,14 +437,7 @@ def main(argv, linux=None):
if not os.path.exists(cli_args.build_dir):
os.mkdir(cli_args.build_dir)
- if not linux:
- linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir,
- kunitconfig_path=cli_args.kunitconfig,
- kconfig_add=cli_args.kconfig_add,
- arch=cli_args.arch,
- cross_compile=cli_args.cross_compile,
- qemu_config_path=cli_args.qemu_config)
-
+ linux = tree_from_args(cli_args)
request = KunitRequest(build_dir=cli_args.build_dir,
make_options=cli_args.make_options,
jobs=cli_args.jobs,
@@ -438,50 +456,29 @@ def main(argv, linux=None):
not os.path.exists(cli_args.build_dir)):
os.mkdir(cli_args.build_dir)
- if not linux:
- linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir,
- kunitconfig_path=cli_args.kunitconfig,
- kconfig_add=cli_args.kconfig_add,
- arch=cli_args.arch,
- cross_compile=cli_args.cross_compile,
- qemu_config_path=cli_args.qemu_config)
-
+ linux = tree_from_args(cli_args)
request = KunitConfigRequest(build_dir=cli_args.build_dir,
make_options=cli_args.make_options)
result = config_tests(linux, request)
- kunit_parser.print_with_timestamp((
+ stdout.print_with_timestamp((
'Elapsed time: %.3fs\n') % (
result.elapsed_time))
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
elif cli_args.subcommand == 'build':
- if not linux:
- linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir,
- kunitconfig_path=cli_args.kunitconfig,
- kconfig_add=cli_args.kconfig_add,
- arch=cli_args.arch,
- cross_compile=cli_args.cross_compile,
- qemu_config_path=cli_args.qemu_config)
-
+ linux = tree_from_args(cli_args)
request = KunitBuildRequest(build_dir=cli_args.build_dir,
make_options=cli_args.make_options,
jobs=cli_args.jobs,
alltests=cli_args.alltests)
result = config_and_build_tests(linux, request)
- kunit_parser.print_with_timestamp((
+ stdout.print_with_timestamp((
'Elapsed time: %.3fs\n') % (
result.elapsed_time))
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
elif cli_args.subcommand == 'exec':
- if not linux:
- linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir,
- kunitconfig_path=cli_args.kunitconfig,
- kconfig_add=cli_args.kconfig_add,
- arch=cli_args.arch,
- cross_compile=cli_args.cross_compile,
- qemu_config_path=cli_args.qemu_config)
-
+ linux = tree_from_args(cli_args)
exec_request = KunitExecRequest(raw_output=cli_args.raw_output,
build_dir=cli_args.build_dir,
json=cli_args.json,
@@ -491,21 +488,22 @@ def main(argv, linux=None):
kernel_args=cli_args.kernel_args,
run_isolated=cli_args.run_isolated)
result = exec_tests(linux, exec_request)
- kunit_parser.print_with_timestamp((
+ stdout.print_with_timestamp((
'Elapsed time: %.3fs\n') % (result.elapsed_time))
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
elif cli_args.subcommand == 'parse':
- if cli_args.file == None:
+ if cli_args.file is None:
sys.stdin.reconfigure(errors='backslashreplace') # pytype: disable=attribute-error
kunit_output = sys.stdin
else:
with open(cli_args.file, 'r', errors='backslashreplace') as f:
kunit_output = f.read().splitlines()
+ # We know nothing about how the result was created!
+ metadata = kunit_json.Metadata()
request = KunitParseRequest(raw_output=cli_args.raw_output,
- build_dir='',
json=cli_args.json)
- result, _ = parse_tests(request, kunit_output)
+ result, _ = parse_tests(request, metadata, kunit_output)
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
else:
diff --git a/tools/testing/kunit/kunit_config.py b/tools/testing/kunit/kunit_config.py
index 677354546156..48b5f34b2e5d 100644
--- a/tools/testing/kunit/kunit_config.py
+++ b/tools/testing/kunit/kunit_config.py
@@ -6,61 +6,77 @@
# Author: Felix Guo <felixguoxiuping@gmail.com>
# Author: Brendan Higgins <brendanhiggins@google.com>
-import collections
+from dataclasses import dataclass
import re
-from typing import List, Set
+from typing import Dict, Iterable, List, Set, Tuple
CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$'
CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$'
-KconfigEntryBase = collections.namedtuple('KconfigEntryBase', ['name', 'value'])
-
-class KconfigEntry(KconfigEntryBase):
+@dataclass(frozen=True)
+class KconfigEntry:
+ name: str
+ value: str
def __str__(self) -> str:
if self.value == 'n':
- return r'# CONFIG_%s is not set' % (self.name)
- else:
- return r'CONFIG_%s=%s' % (self.name, self.value)
+ return f'# CONFIG_{self.name} is not set'
+ return f'CONFIG_{self.name}={self.value}'
class KconfigParseError(Exception):
"""Error parsing Kconfig defconfig or .config."""
-class Kconfig(object):
+class Kconfig:
"""Represents defconfig or .config specified using the Kconfig language."""
def __init__(self) -> None:
- self._entries = [] # type: List[KconfigEntry]
+ self._entries = {} # type: Dict[str, str]
+
+ def __eq__(self, other) -> bool:
+ if not isinstance(other, self.__class__):
+ return False
+ return self._entries == other._entries
- def entries(self) -> Set[KconfigEntry]:
- return set(self._entries)
+ def __repr__(self) -> str:
+ return ','.join(str(e) for e in self.as_entries())
- def add_entry(self, entry: KconfigEntry) -> None:
- self._entries.append(entry)
+ def as_entries(self) -> Iterable[KconfigEntry]:
+ for name, value in self._entries.items():
+ yield KconfigEntry(name, value)
+
+ def add_entry(self, name: str, value: str) -> None:
+ self._entries[name] = value
def is_subset_of(self, other: 'Kconfig') -> bool:
- other_dict = {e.name: e.value for e in other.entries()}
- for a in self.entries():
- b = other_dict.get(a.name)
+ for name, value in self._entries.items():
+ b = other._entries.get(name)
if b is None:
- if a.value == 'n':
+ if value == 'n':
continue
return False
- elif a.value != b:
+ if value != b:
return False
return True
+ def conflicting_options(self, other: 'Kconfig') -> List[Tuple[KconfigEntry, KconfigEntry]]:
+ diff = [] # type: List[Tuple[KconfigEntry, KconfigEntry]]
+ for name, value in self._entries.items():
+ b = other._entries.get(name)
+ if b and value != b:
+ pair = (KconfigEntry(name, value), KconfigEntry(name, b))
+ diff.append(pair)
+ return diff
+
def merge_in_entries(self, other: 'Kconfig') -> None:
- if other.is_subset_of(self):
- return
- self._entries = list(self.entries().union(other.entries()))
+ for name, value in other._entries.items():
+ self._entries[name] = value
def write_to_file(self, path: str) -> None:
with open(path, 'a+') as f:
- for entry in self.entries():
- f.write(str(entry) + '\n')
+ for e in self.as_entries():
+ f.write(str(e) + '\n')
def parse_file(path: str) -> Kconfig:
with open(path, 'r') as f:
@@ -78,18 +94,15 @@ def parse_from_string(blob: str) -> Kconfig:
match = config_matcher.match(line)
if match:
- entry = KconfigEntry(match.group(1), match.group(2))
- kconfig.add_entry(entry)
+ kconfig.add_entry(match.group(1), match.group(2))
continue
empty_match = is_not_set_matcher.match(line)
if empty_match:
- entry = KconfigEntry(empty_match.group(1), 'n')
- kconfig.add_entry(entry)
+ kconfig.add_entry(empty_match.group(1), 'n')
continue
if line[0] == '#':
continue
- else:
- raise KconfigParseError('Failed to parse: ' + line)
+ raise KconfigParseError('Failed to parse: ' + line)
return kconfig
diff --git a/tools/testing/kunit/kunit_json.py b/tools/testing/kunit/kunit_json.py
index 6862671709bc..10ff65689dd8 100644
--- a/tools/testing/kunit/kunit_json.py
+++ b/tools/testing/kunit/kunit_json.py
@@ -6,60 +6,58 @@
# Copyright (C) 2020, Google LLC.
# Author: Heidi Fahim <heidifahim@google.com>
+from dataclasses import dataclass
import json
-import os
-
-import kunit_parser
+from typing import Any, Dict
from kunit_parser import Test, TestStatus
-from typing import Any, Dict, Optional
+
+@dataclass
+class Metadata:
+ """Stores metadata about this run to include in get_json_result()."""
+ arch: str = ''
+ def_config: str = ''
+ build_dir: str = ''
JsonObj = Dict[str, Any]
-def _get_group_json(test: Test, def_config: str,
- build_dir: Optional[str]) -> JsonObj:
+_status_map: Dict[TestStatus, str] = {
+ TestStatus.SUCCESS: "PASS",
+ TestStatus.SKIPPED: "SKIP",
+ TestStatus.TEST_CRASHED: "ERROR",
+}
+
+def _get_group_json(test: Test, common_fields: JsonObj) -> JsonObj:
sub_groups = [] # List[JsonObj]
test_cases = [] # List[JsonObj]
for subtest in test.subtests:
- if len(subtest.subtests):
- sub_group = _get_group_json(subtest, def_config,
- build_dir)
+ if subtest.subtests:
+ sub_group = _get_group_json(subtest, common_fields)
sub_groups.append(sub_group)
- else:
- test_case = {"name": subtest.name, "status": "FAIL"}
- if subtest.status == TestStatus.SUCCESS:
- test_case["status"] = "PASS"
- elif subtest.status == TestStatus.SKIPPED:
- test_case["status"] = "SKIP"
- elif subtest.status == TestStatus.TEST_CRASHED:
- test_case["status"] = "ERROR"
- test_cases.append(test_case)
+ continue
+ status = _status_map.get(subtest.status, "FAIL")
+ test_cases.append({"name": subtest.name, "status": status})
test_group = {
"name": test.name,
- "arch": "UM",
- "defconfig": def_config,
- "build_environment": build_dir,
"sub_groups": sub_groups,
"test_cases": test_cases,
+ }
+ test_group.update(common_fields)
+ return test_group
+
+def get_json_result(test: Test, metadata: Metadata) -> str:
+ common_fields = {
+ "arch": metadata.arch,
+ "defconfig": metadata.def_config,
+ "build_environment": metadata.build_dir,
"lab_name": None,
"kernel": None,
"job": None,
"git_branch": "kselftest",
}
- return test_group
-def get_json_result(test: Test, def_config: str,
- build_dir: Optional[str], json_path: str) -> str:
- test_group = _get_group_json(test, def_config, build_dir)
+ test_group = _get_group_json(test, common_fields)
test_group["name"] = "KUnit Test Group"
- json_obj = json.dumps(test_group, indent=4)
- if json_path != 'stdout':
- with open(json_path, 'w') as result_path:
- result_path.write(json_obj)
- root = __file__.split('tools/testing/kunit/')[0]
- kunit_parser.print_with_timestamp(
- "Test results stored in %s" %
- os.path.join(root, result_path.name))
- return json_obj
+ return json.dumps(test_group, indent=4)
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index 3c4196cef3ed..f5c26ea89714 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -11,13 +11,14 @@ import importlib.util
import logging
import subprocess
import os
+import shlex
import shutil
import signal
import threading
from typing import Iterator, List, Optional, Tuple
import kunit_config
-import kunit_parser
+from kunit_printer import stdout
import qemu_config
KCONFIG_PATH = '.config'
@@ -25,15 +26,11 @@ KUNITCONFIG_PATH = '.kunitconfig'
OLD_KUNITCONFIG_PATH = 'last_used_kunitconfig'
DEFAULT_KUNITCONFIG_PATH = 'tools/testing/kunit/configs/default.config'
BROKEN_ALLCONFIG_PATH = 'tools/testing/kunit/configs/broken_on_uml.config'
+UML_KCONFIG_PATH = 'tools/testing/kunit/configs/arch_uml.config'
OUTFILE_PATH = 'test.log'
ABS_TOOL_PATH = os.path.abspath(os.path.dirname(__file__))
QEMU_CONFIGS_DIR = os.path.join(ABS_TOOL_PATH, 'qemu_configs')
-def get_file_path(build_dir, default):
- if build_dir:
- default = os.path.join(build_dir, default)
- return default
-
class ConfigError(Exception):
"""Represents an error trying to configure the Linux kernel."""
@@ -42,7 +39,7 @@ class BuildError(Exception):
"""Represents an error trying to build the Linux kernel."""
-class LinuxSourceTreeOperations(object):
+class LinuxSourceTreeOperations:
"""An abstraction over command line operations performed on a source tree."""
def __init__(self, linux_arch: str, cross_compile: Optional[str]):
@@ -57,20 +54,18 @@ class LinuxSourceTreeOperations(object):
except subprocess.CalledProcessError as e:
raise ConfigError(e.output.decode())
- def make_arch_qemuconfig(self, kconfig: kunit_config.Kconfig) -> None:
- pass
+ def make_arch_config(self, base_kunitconfig: kunit_config.Kconfig) -> kunit_config.Kconfig:
+ return base_kunitconfig
- def make_allyesconfig(self, build_dir, make_options) -> None:
+ def make_allyesconfig(self, build_dir: str, make_options) -> None:
raise ConfigError('Only the "um" arch is supported for alltests')
- def make_olddefconfig(self, build_dir, make_options) -> None:
- command = ['make', 'ARCH=' + self._linux_arch, 'olddefconfig']
+ def make_olddefconfig(self, build_dir: str, make_options) -> None:
+ command = ['make', 'ARCH=' + self._linux_arch, 'O=' + build_dir, 'olddefconfig']
if self._cross_compile:
command += ['CROSS_COMPILE=' + self._cross_compile]
if make_options:
command.extend(make_options)
- if build_dir:
- command += ['O=' + build_dir]
print('Populating config with:\n$', ' '.join(command))
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
@@ -79,14 +74,12 @@ class LinuxSourceTreeOperations(object):
except subprocess.CalledProcessError as e:
raise ConfigError(e.output.decode())
- def make(self, jobs, build_dir, make_options) -> None:
- command = ['make', 'ARCH=' + self._linux_arch, '--jobs=' + str(jobs)]
+ def make(self, jobs, build_dir: str, make_options) -> None:
+ command = ['make', 'ARCH=' + self._linux_arch, 'O=' + build_dir, '--jobs=' + str(jobs)]
if make_options:
command.extend(make_options)
if self._cross_compile:
command += ['CROSS_COMPILE=' + self._cross_compile]
- if build_dir:
- command += ['O=' + build_dir]
print('Building with:\n$', ' '.join(command))
try:
proc = subprocess.Popen(command,
@@ -117,9 +110,10 @@ class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations):
self._kernel_command_line = qemu_arch_params.kernel_command_line + ' kunit_shutdown=reboot'
self._extra_qemu_params = qemu_arch_params.extra_qemu_params
- def make_arch_qemuconfig(self, base_kunitconfig: kunit_config.Kconfig) -> None:
+ def make_arch_config(self, base_kunitconfig: kunit_config.Kconfig) -> kunit_config.Kconfig:
kconfig = kunit_config.parse_from_string(self._kconfig)
- base_kunitconfig.merge_in_entries(kconfig)
+ kconfig.merge_in_entries(base_kunitconfig)
+ return kconfig
def start(self, params: List[str], build_dir: str) -> subprocess.Popen:
kernel_path = os.path.join(build_dir, self._kernel_path)
@@ -127,16 +121,17 @@ class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations):
'-nodefaults',
'-m', '1024',
'-kernel', kernel_path,
- '-append', '\'' + ' '.join(params + [self._kernel_command_line]) + '\'',
+ '-append', ' '.join(params + [self._kernel_command_line]),
'-no-reboot',
'-nographic',
- '-serial stdio'] + self._extra_qemu_params
- print('Running tests with:\n$', ' '.join(qemu_command))
- return subprocess.Popen(' '.join(qemu_command),
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- text=True, shell=True, errors='backslashreplace')
+ '-serial', 'stdio'] + self._extra_qemu_params
+ # Note: shlex.join() does what we want, but requires python 3.8+.
+ print('Running tests with:\n$', ' '.join(shlex.quote(arg) for arg in qemu_command))
+ return subprocess.Popen(qemu_command,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ text=True, errors='backslashreplace')
class LinuxSourceTreeOperationsUml(LinuxSourceTreeOperations):
"""An abstraction over command line operations performed on a source tree."""
@@ -144,62 +139,88 @@ class LinuxSourceTreeOperationsUml(LinuxSourceTreeOperations):
def __init__(self, cross_compile=None):
super().__init__(linux_arch='um', cross_compile=cross_compile)
- def make_allyesconfig(self, build_dir, make_options) -> None:
- kunit_parser.print_with_timestamp(
+ def make_arch_config(self, base_kunitconfig: kunit_config.Kconfig) -> kunit_config.Kconfig:
+ kconfig = kunit_config.parse_file(UML_KCONFIG_PATH)
+ kconfig.merge_in_entries(base_kunitconfig)
+ return kconfig
+
+ def make_allyesconfig(self, build_dir: str, make_options) -> None:
+ stdout.print_with_timestamp(
'Enabling all CONFIGs for UML...')
- command = ['make', 'ARCH=um', 'allyesconfig']
+ command = ['make', 'ARCH=um', 'O=' + build_dir, 'allyesconfig']
if make_options:
command.extend(make_options)
- if build_dir:
- command += ['O=' + build_dir]
process = subprocess.Popen(
command,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT)
process.wait()
- kunit_parser.print_with_timestamp(
+ stdout.print_with_timestamp(
'Disabling broken configs to run KUnit tests...')
with open(get_kconfig_path(build_dir), 'a') as config:
with open(BROKEN_ALLCONFIG_PATH, 'r') as disable:
config.write(disable.read())
- kunit_parser.print_with_timestamp(
+ stdout.print_with_timestamp(
'Starting Kernel with all configs takes a few minutes...')
def start(self, params: List[str], build_dir: str) -> subprocess.Popen:
"""Runs the Linux UML binary. Must be named 'linux'."""
- linux_bin = get_file_path(build_dir, 'linux')
+ linux_bin = os.path.join(build_dir, 'linux')
+ params.extend(['mem=1G', 'console=tty', 'kunit_shutdown=halt'])
return subprocess.Popen([linux_bin] + params,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True, errors='backslashreplace')
-def get_kconfig_path(build_dir) -> str:
- return get_file_path(build_dir, KCONFIG_PATH)
+def get_kconfig_path(build_dir: str) -> str:
+ return os.path.join(build_dir, KCONFIG_PATH)
+
+def get_kunitconfig_path(build_dir: str) -> str:
+ return os.path.join(build_dir, KUNITCONFIG_PATH)
+
+def get_old_kunitconfig_path(build_dir: str) -> str:
+ return os.path.join(build_dir, OLD_KUNITCONFIG_PATH)
-def get_kunitconfig_path(build_dir) -> str:
- return get_file_path(build_dir, KUNITCONFIG_PATH)
+def get_parsed_kunitconfig(build_dir: str,
+ kunitconfig_paths: Optional[List[str]]=None) -> kunit_config.Kconfig:
+ if not kunitconfig_paths:
+ path = get_kunitconfig_path(build_dir)
+ if not os.path.exists(path):
+ shutil.copyfile(DEFAULT_KUNITCONFIG_PATH, path)
+ return kunit_config.parse_file(path)
-def get_old_kunitconfig_path(build_dir) -> str:
- return get_file_path(build_dir, OLD_KUNITCONFIG_PATH)
+ merged = kunit_config.Kconfig()
-def get_outfile_path(build_dir) -> str:
- return get_file_path(build_dir, OUTFILE_PATH)
+ for path in kunitconfig_paths:
+ if os.path.isdir(path):
+ path = os.path.join(path, KUNITCONFIG_PATH)
+ if not os.path.exists(path):
+ raise ConfigError(f'Specified kunitconfig ({path}) does not exist')
-def get_source_tree_ops(arch: str, cross_compile: Optional[str]) -> LinuxSourceTreeOperations:
+ partial = kunit_config.parse_file(path)
+ diff = merged.conflicting_options(partial)
+ if diff:
+ diff_str = '\n\n'.join(f'{a}\n vs from {path}\n{b}' for a, b in diff)
+ raise ConfigError(f'Multiple values specified for {len(diff)} options in kunitconfig:\n{diff_str}')
+ merged.merge_in_entries(partial)
+ return merged
+
+def get_outfile_path(build_dir: str) -> str:
+ return os.path.join(build_dir, OUTFILE_PATH)
+
+def _default_qemu_config_path(arch: str) -> str:
config_path = os.path.join(QEMU_CONFIGS_DIR, arch + '.py')
- if arch == 'um':
- return LinuxSourceTreeOperationsUml(cross_compile=cross_compile)
- elif os.path.isfile(config_path):
- return get_source_tree_ops_from_qemu_config(config_path, cross_compile)[1]
+ if os.path.isfile(config_path):
+ return config_path
options = [f[:-3] for f in os.listdir(QEMU_CONFIGS_DIR) if f.endswith('.py')]
raise ConfigError(arch + ' is not a valid arch, options are ' + str(sorted(options)))
-def get_source_tree_ops_from_qemu_config(config_path: str,
- cross_compile: Optional[str]) -> Tuple[
- str, LinuxSourceTreeOperations]:
+def _get_qemu_ops(config_path: str,
+ extra_qemu_args: Optional[List[str]],
+ cross_compile: Optional[str]) -> Tuple[str, LinuxSourceTreeOperations]:
# The module name/path has very little to do with where the actual file
# exists (I learned this through experimentation and could not find it
# anywhere in the Python documentation).
@@ -219,47 +240,41 @@ def get_source_tree_ops_from_qemu_config(config_path: str,
if not hasattr(config, 'QEMU_ARCH'):
raise ValueError('qemu_config module missing "QEMU_ARCH": ' + config_path)
params: qemu_config.QemuArchParams = config.QEMU_ARCH # type: ignore
+ if extra_qemu_args:
+ params.extra_qemu_params.extend(extra_qemu_args)
return params.linux_arch, LinuxSourceTreeOperationsQemu(
params, cross_compile=cross_compile)
-class LinuxSourceTree(object):
+class LinuxSourceTree:
"""Represents a Linux kernel source tree with KUnit tests."""
def __init__(
self,
build_dir: str,
- load_config=True,
- kunitconfig_path='',
+ kunitconfig_paths: Optional[List[str]]=None,
kconfig_add: Optional[List[str]]=None,
arch=None,
cross_compile=None,
- qemu_config_path=None) -> None:
+ qemu_config_path=None,
+ extra_qemu_args=None) -> None:
signal.signal(signal.SIGINT, self.signal_handler)
if qemu_config_path:
- self._arch, self._ops = get_source_tree_ops_from_qemu_config(
- qemu_config_path, cross_compile)
+ self._arch, self._ops = _get_qemu_ops(qemu_config_path, extra_qemu_args, cross_compile)
else:
self._arch = 'um' if arch is None else arch
- self._ops = get_source_tree_ops(self._arch, cross_compile)
-
- if not load_config:
- return
+ if self._arch == 'um':
+ self._ops = LinuxSourceTreeOperationsUml(cross_compile=cross_compile)
+ else:
+ qemu_config_path = _default_qemu_config_path(self._arch)
+ _, self._ops = _get_qemu_ops(qemu_config_path, extra_qemu_args, cross_compile)
- if kunitconfig_path:
- if os.path.isdir(kunitconfig_path):
- kunitconfig_path = os.path.join(kunitconfig_path, KUNITCONFIG_PATH)
- if not os.path.exists(kunitconfig_path):
- raise ConfigError(f'Specified kunitconfig ({kunitconfig_path}) does not exist')
- else:
- kunitconfig_path = get_kunitconfig_path(build_dir)
- if not os.path.exists(kunitconfig_path):
- shutil.copyfile(DEFAULT_KUNITCONFIG_PATH, kunitconfig_path)
-
- self._kconfig = kunit_config.parse_file(kunitconfig_path)
+ self._kconfig = get_parsed_kunitconfig(build_dir, kunitconfig_paths)
if kconfig_add:
kconfig = kunit_config.parse_from_string('\n'.join(kconfig_add))
self._kconfig.merge_in_entries(kconfig)
+ def arch(self) -> str:
+ return self._arch
def clean(self) -> bool:
try:
@@ -269,27 +284,27 @@ class LinuxSourceTree(object):
return False
return True
- def validate_config(self, build_dir) -> bool:
+ def validate_config(self, build_dir: str) -> bool:
kconfig_path = get_kconfig_path(build_dir)
validated_kconfig = kunit_config.parse_file(kconfig_path)
if self._kconfig.is_subset_of(validated_kconfig):
return True
- invalid = self._kconfig.entries() - validated_kconfig.entries()
+ missing = set(self._kconfig.as_entries()) - set(validated_kconfig.as_entries())
message = 'Not all Kconfig options selected in kunitconfig were in the generated .config.\n' \
'This is probably due to unsatisfied dependencies.\n' \
- 'Missing: ' + ', '.join([str(e) for e in invalid])
+ 'Missing: ' + ', '.join(str(e) for e in missing)
if self._arch == 'um':
message += '\nNote: many Kconfig options aren\'t available on UML. You can try running ' \
'on a different architecture with something like "--arch=x86_64".'
logging.error(message)
return False
- def build_config(self, build_dir, make_options) -> bool:
+ def build_config(self, build_dir: str, make_options) -> bool:
kconfig_path = get_kconfig_path(build_dir)
if build_dir and not os.path.exists(build_dir):
os.mkdir(build_dir)
try:
- self._ops.make_arch_qemuconfig(self._kconfig)
+ self._kconfig = self._ops.make_arch_config(self._kconfig)
self._kconfig.write_to_file(kconfig_path)
self._ops.make_olddefconfig(build_dir, make_options)
except ConfigError as e:
@@ -310,9 +325,9 @@ class LinuxSourceTree(object):
return True
old_kconfig = kunit_config.parse_file(old_path)
- return old_kconfig.entries() != self._kconfig.entries()
+ return old_kconfig != self._kconfig
- def build_reconfig(self, build_dir, make_options) -> bool:
+ def build_reconfig(self, build_dir: str, make_options) -> bool:
"""Creates a new .config if it is not a subset of the .kunitconfig."""
kconfig_path = get_kconfig_path(build_dir)
if not os.path.exists(kconfig_path):
@@ -320,14 +335,15 @@ class LinuxSourceTree(object):
return self.build_config(build_dir, make_options)
existing_kconfig = kunit_config.parse_file(kconfig_path)
- self._ops.make_arch_qemuconfig(self._kconfig)
+ self._kconfig = self._ops.make_arch_config(self._kconfig)
+
if self._kconfig.is_subset_of(existing_kconfig) and not self._kunitconfig_changed(build_dir):
return True
print('Regenerating .config ...')
os.remove(kconfig_path)
return self.build_config(build_dir, make_options)
- def build_kernel(self, alltests, jobs, build_dir, make_options) -> bool:
+ def build_kernel(self, alltests, jobs, build_dir: str, make_options) -> bool:
try:
if alltests:
self._ops.make_allyesconfig(build_dir, make_options)
@@ -341,7 +357,6 @@ class LinuxSourceTree(object):
def run_kernel(self, args=None, build_dir='', filter_glob='', timeout=None) -> Iterator[str]:
if not args:
args = []
- args.extend(['mem=1G', 'console=tty', 'kunit_shutdown=halt'])
if filter_glob:
args.append('kunit.filter_glob='+filter_glob)
@@ -375,6 +390,6 @@ class LinuxSourceTree(object):
waiter.join()
subprocess.call(['stty', 'sane'])
- def signal_handler(self, sig, frame) -> None:
+ def signal_handler(self, unused_sig, unused_frame) -> None:
logging.error('Build interruption occurred. Cleaning console.')
subprocess.call(['stty', 'sane'])
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
index 05ff334761dd..12d3ec77f427 100644
--- a/tools/testing/kunit/kunit_parser.py
+++ b/tools/testing/kunit/kunit_parser.py
@@ -11,13 +11,14 @@
from __future__ import annotations
import re
+import sys
-import datetime
from enum import Enum, auto
-from functools import reduce
from typing import Iterable, Iterator, List, Optional, Tuple
-class Test(object):
+from kunit_printer import stdout
+
+class Test:
"""
A class to represent a test parsed from KTAP results. All KTAP
results within a test log are stored in a main Test object as
@@ -45,10 +46,8 @@ class Test(object):
def __str__(self) -> str:
"""Returns string representation of a Test class object."""
- return ('Test(' + str(self.status) + ', ' + self.name +
- ', ' + str(self.expected_count) + ', ' +
- str(self.subtests) + ', ' + str(self.log) + ', ' +
- str(self.counts) + ')')
+ return (f'Test({self.status}, {self.name}, {self.expected_count}, '
+ f'{self.subtests}, {self.log}, {self.counts})')
def __repr__(self) -> str:
"""Returns string representation of a Test class object."""
@@ -57,7 +56,7 @@ class Test(object):
def add_error(self, error_message: str) -> None:
"""Records an error that occurred while parsing this test."""
self.counts.errors += 1
- print_error('Test ' + self.name + ': ' + error_message)
+ stdout.print_with_timestamp(stdout.red('[ERROR]') + f' Test: {self.name}: {error_message}')
class TestStatus(Enum):
"""An enumeration class to represent the status of a test."""
@@ -91,13 +90,12 @@ class TestCounts:
self.errors = 0
def __str__(self) -> str:
- """Returns the string representation of a TestCounts object.
- """
- return ('Passed: ' + str(self.passed) +
- ', Failed: ' + str(self.failed) +
- ', Crashed: ' + str(self.crashed) +
- ', Skipped: ' + str(self.skipped) +
- ', Errors: ' + str(self.errors))
+ """Returns the string representation of a TestCounts object."""
+ statuses = [('passed', self.passed), ('failed', self.failed),
+ ('crashed', self.crashed), ('skipped', self.skipped),
+ ('errors', self.errors)]
+ return f'Ran {self.total()} tests: ' + \
+ ', '.join(f'{s}: {n}' for s, n in statuses if n > 0)
def total(self) -> int:
"""Returns the total number of test cases within a test
@@ -128,31 +126,19 @@ class TestCounts:
"""
if self.total() == 0:
return TestStatus.NO_TESTS
- elif self.crashed:
- # If one of the subtests crash, the expected status
- # of the Test is crashed.
+ if self.crashed:
+ # Crashes should take priority.
return TestStatus.TEST_CRASHED
- elif self.failed:
- # Otherwise if one of the subtests fail, the
- # expected status of the Test is failed.
+ if self.failed:
return TestStatus.FAILURE
- elif self.passed:
- # Otherwise if one of the subtests pass, the
- # expected status of the Test is passed.
+ if self.passed:
+ # No failures or crashes, looks good!
return TestStatus.SUCCESS
- else:
- # Finally, if none of the subtests have failed,
- # crashed, or passed, the expected status of the
- # Test is skipped.
- return TestStatus.SKIPPED
+ # We have only skipped tests.
+ return TestStatus.SKIPPED
def add_status(self, status: TestStatus) -> None:
- """
- Increments count of inputted status.
-
- Parameters:
- status - status to be added to the TestCounts object
- """
+ """Increments the count for `status`."""
if status == TestStatus.SUCCESS:
self.passed += 1
elif status == TestStatus.FAILURE:
@@ -282,11 +268,9 @@ def check_version(version_num: int, accepted_versions: List[int],
test - Test object for current test being parsed
"""
if version_num < min(accepted_versions):
- test.add_error(version_type +
- ' version lower than expected!')
+ test.add_error(f'{version_type} version lower than expected!')
elif version_num > max(accepted_versions):
- test.add_error(
- version_type + ' version higher than expected!')
+ test.add_error(f'{version_type} version higer than expected!')
def parse_ktap_header(lines: LineStream, test: Test) -> bool:
"""
@@ -396,7 +380,7 @@ def peek_test_name_match(lines: LineStream, test: Test) -> bool:
if not match:
return False
name = match.group(4)
- return (name == test.name)
+ return name == test.name
def parse_test_result(lines: LineStream, test: Test,
expected_num: int) -> bool:
@@ -439,8 +423,7 @@ def parse_test_result(lines: LineStream, test: Test,
# Check test num
num = int(match.group(2))
if num != expected_num:
- test.add_error('Expected test number ' +
- str(expected_num) + ' but found ' + str(num))
+ test.add_error(f'Expected test number {expected_num} but found {num}')
# Set status of test object
status = match.group(1)
@@ -474,51 +457,11 @@ def parse_diagnostic(lines: LineStream) -> List[str]:
log.append(lines.pop())
return log
-DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^# .*?: kunit test case crashed!$')
-
-def parse_crash_in_log(test: Test) -> bool:
- """
- Iterate through the lines of the log to parse for crash message.
- If crash message found, set status to crashed and return True.
- Otherwise return False.
-
- Parameters:
- test - Test object for current test being parsed
-
- Return:
- True if crash message found in log
- """
- for line in test.log:
- if DIAGNOSTIC_CRASH_MESSAGE.match(line):
- test.status = TestStatus.TEST_CRASHED
- return True
- return False
-
# Printing helper methods:
DIVIDER = '=' * 60
-RESET = '\033[0;0m'
-
-def red(text: str) -> str:
- """Returns inputted string with red color code."""
- return '\033[1;31m' + text + RESET
-
-def yellow(text: str) -> str:
- """Returns inputted string with yellow color code."""
- return '\033[1;33m' + text + RESET
-
-def green(text: str) -> str:
- """Returns inputted string with green color code."""
- return '\033[1;32m' + text + RESET
-
-ANSI_LEN = len(red(''))
-
-def print_with_timestamp(message: str) -> None:
- """Prints message with timestamp at beginning."""
- print('[%s] %s' % (datetime.datetime.now().strftime('%H:%M:%S'), message))
-
def format_test_divider(message: str, len_message: int) -> str:
"""
Returns string with message centered in fixed width divider.
@@ -542,7 +485,7 @@ def format_test_divider(message: str, len_message: int) -> str:
# calculate number of dashes for each side of the divider
len_1 = int(difference / 2)
len_2 = difference - len_1
- return ('=' * len_1) + ' ' + message + ' ' + ('=' * len_2)
+ return ('=' * len_1) + f' {message} ' + ('=' * len_2)
def print_test_header(test: Test) -> None:
"""
@@ -558,22 +501,15 @@ def print_test_header(test: Test) -> None:
message = test.name
if test.expected_count:
if test.expected_count == 1:
- message += (' (' + str(test.expected_count) +
- ' subtest)')
+ message += ' (1 subtest)'
else:
- message += (' (' + str(test.expected_count) +
- ' subtests)')
- print_with_timestamp(format_test_divider(message, len(message)))
+ message += f' ({test.expected_count} subtests)'
+ stdout.print_with_timestamp(format_test_divider(message, len(message)))
def print_log(log: Iterable[str]) -> None:
- """
- Prints all strings in saved log for test in yellow.
-
- Parameters:
- log - Iterable object with all strings saved in log for test
- """
+ """Prints all strings in saved log for test in yellow."""
for m in log:
- print_with_timestamp(yellow(m))
+ stdout.print_with_timestamp(stdout.yellow(m))
def format_test_result(test: Test) -> str:
"""
@@ -590,17 +526,16 @@ def format_test_result(test: Test) -> str:
String containing formatted test result
"""
if test.status == TestStatus.SUCCESS:
- return (green('[PASSED] ') + test.name)
- elif test.status == TestStatus.SKIPPED:
- return (yellow('[SKIPPED] ') + test.name)
- elif test.status == TestStatus.NO_TESTS:
- return (yellow('[NO TESTS RUN] ') + test.name)
- elif test.status == TestStatus.TEST_CRASHED:
- print_log(test.log)
- return (red('[CRASHED] ') + test.name)
- else:
+ return stdout.green('[PASSED] ') + test.name
+ if test.status == TestStatus.SKIPPED:
+ return stdout.yellow('[SKIPPED] ') + test.name
+ if test.status == TestStatus.NO_TESTS:
+ return stdout.yellow('[NO TESTS RUN] ') + test.name
+ if test.status == TestStatus.TEST_CRASHED:
print_log(test.log)
- return (red('[FAILED] ') + test.name)
+ return stdout.red('[CRASHED] ') + test.name
+ print_log(test.log)
+ return stdout.red('[FAILED] ') + test.name
def print_test_result(test: Test) -> None:
"""
@@ -612,7 +547,7 @@ def print_test_result(test: Test) -> None:
Parameters:
test - Test object representing current test being printed
"""
- print_with_timestamp(format_test_result(test))
+ stdout.print_with_timestamp(format_test_result(test))
def print_test_footer(test: Test) -> None:
"""
@@ -625,8 +560,8 @@ def print_test_footer(test: Test) -> None:
test - Test object representing current test being printed
"""
message = format_test_result(test)
- print_with_timestamp(format_test_divider(message,
- len(message) - ANSI_LEN))
+ stdout.print_with_timestamp(format_test_divider(message,
+ len(message) - stdout.color_len()))
def print_summary_line(test: Test) -> None:
"""
@@ -643,25 +578,12 @@ def print_summary_line(test: Test) -> None:
test - Test object representing current test being printed
"""
if test.status == TestStatus.SUCCESS:
- color = green
- elif test.status == TestStatus.SKIPPED or test.status == TestStatus.NO_TESTS:
- color = yellow
+ color = stdout.green
+ elif test.status in (TestStatus.SKIPPED, TestStatus.NO_TESTS):
+ color = stdout.yellow
else:
- color = red
- counts = test.counts
- print_with_timestamp(color('Testing complete. ' + str(counts)))
-
-def print_error(error_message: str) -> None:
- """
- Prints error message with error format.
-
- Example:
- "[ERROR] Test example: missing test plan!"
-
- Parameters:
- error_message - message describing error
- """
- print_with_timestamp(red('[ERROR] ') + error_message)
+ color = stdout.red
+ stdout.print_with_timestamp(color(f'Testing complete. {test.counts}'))
# Other methods:
@@ -675,7 +597,6 @@ def bubble_up_test_results(test: Test) -> None:
Parameters:
test - Test object for current test being parsed
"""
- parse_crash_in_log(test)
subtests = test.subtests
counts = test.counts
status = test.status
@@ -789,8 +710,11 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
# Check for there being no tests
if parent_test and len(subtests) == 0:
- test.status = TestStatus.NO_TESTS
- test.add_error('0 tests run!')
+ # Don't override a bad status if this test had one reported.
+ # Assumption: no subtests means CRASHED is from Test.__init__()
+ if test.status in (TestStatus.TEST_CRASHED, TestStatus.SUCCESS):
+ test.status = TestStatus.NO_TESTS
+ test.add_error('0 tests run!')
# Add statuses to TestCounts attribute in Test object
bubble_up_test_results(test)
@@ -805,7 +729,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
def parse_run_tests(kernel_output: Iterable[str]) -> Test:
"""
Using kernel output, extract KTAP lines, parse the lines for test
- results and print condensed test results and summary line .
+ results and print condensed test results and summary line.
Parameters:
kernel_output - Iterable object contains lines of kernel output
@@ -813,16 +737,17 @@ def parse_run_tests(kernel_output: Iterable[str]) -> Test:
Return:
Test - the main test object with all subtests.
"""
- print_with_timestamp(DIVIDER)
+ stdout.print_with_timestamp(DIVIDER)
lines = extract_tap_lines(kernel_output)
test = Test()
if not lines:
- test.add_error('invalid KTAP input!')
+ test.name = '<missing>'
+ test.add_error('could not find any KTAP output!')
test.status = TestStatus.FAILURE_TO_PARSE_TESTS
else:
test = parse_test(lines, 0, [])
if test.status != TestStatus.NO_TESTS:
test.status = test.counts.get_status()
- print_with_timestamp(DIVIDER)
+ stdout.print_with_timestamp(DIVIDER)
print_summary_line(test)
return test
diff --git a/tools/testing/kunit/kunit_printer.py b/tools/testing/kunit/kunit_printer.py
new file mode 100644
index 000000000000..5f1cc55ecdf5
--- /dev/null
+++ b/tools/testing/kunit/kunit_printer.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+#
+# Utilities for printing and coloring output.
+#
+# Copyright (C) 2022, Google LLC.
+# Author: Daniel Latypov <dlatypov@google.com>
+
+import datetime
+import sys
+import typing
+
+_RESET = '\033[0;0m'
+
+class Printer:
+ """Wraps a file object, providing utilities for coloring output, etc."""
+
+ def __init__(self, output: typing.IO):
+ self._output = output
+ self._use_color = output.isatty()
+
+ def print(self, message: str) -> None:
+ print(message, file=self._output)
+
+ def print_with_timestamp(self, message: str) -> None:
+ ts = datetime.datetime.now().strftime('%H:%M:%S')
+ self.print(f'[{ts}] {message}')
+
+ def _color(self, code: str, text: str) -> str:
+ if not self._use_color:
+ return text
+ return code + text + _RESET
+
+ def red(self, text: str) -> str:
+ return self._color('\033[1;31m', text)
+
+ def yellow(self, text: str) -> str:
+ return self._color('\033[1;33m', text)
+
+ def green(self, text: str) -> str:
+ return self._color('\033[1;32m', text)
+
+ def color_len(self) -> int:
+ """Returns the length of the color escape codes."""
+ return len(self.red(''))
+
+# Provides a default instance that prints to stdout
+stdout = Printer(sys.stdout)
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 352369dffbd9..446ac432d9a4 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -45,7 +45,7 @@ class KconfigTest(unittest.TestCase):
self.assertTrue(kconfig0.is_subset_of(kconfig0))
kconfig1 = kunit_config.Kconfig()
- kconfig1.add_entry(kunit_config.KconfigEntry('TEST', 'y'))
+ kconfig1.add_entry('TEST', 'y')
self.assertTrue(kconfig1.is_subset_of(kconfig1))
self.assertTrue(kconfig0.is_subset_of(kconfig1))
self.assertFalse(kconfig1.is_subset_of(kconfig0))
@@ -56,40 +56,28 @@ class KconfigTest(unittest.TestCase):
kconfig = kunit_config.parse_file(kconfig_path)
expected_kconfig = kunit_config.Kconfig()
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('UML', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('MMU', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('TEST', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('EXAMPLE_TEST', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('MK8', 'n'))
-
- self.assertEqual(kconfig.entries(), expected_kconfig.entries())
+ expected_kconfig.add_entry('UML', 'y')
+ expected_kconfig.add_entry('MMU', 'y')
+ expected_kconfig.add_entry('TEST', 'y')
+ expected_kconfig.add_entry('EXAMPLE_TEST', 'y')
+ expected_kconfig.add_entry('MK8', 'n')
+
+ self.assertEqual(kconfig, expected_kconfig)
def test_write_to_file(self):
kconfig_path = os.path.join(test_tmpdir, '.config')
expected_kconfig = kunit_config.Kconfig()
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('UML', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('MMU', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('TEST', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('EXAMPLE_TEST', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('MK8', 'n'))
+ expected_kconfig.add_entry('UML', 'y')
+ expected_kconfig.add_entry('MMU', 'y')
+ expected_kconfig.add_entry('TEST', 'y')
+ expected_kconfig.add_entry('EXAMPLE_TEST', 'y')
+ expected_kconfig.add_entry('MK8', 'n')
expected_kconfig.write_to_file(kconfig_path)
actual_kconfig = kunit_config.parse_file(kconfig_path)
-
- self.assertEqual(actual_kconfig.entries(),
- expected_kconfig.entries())
+ self.assertEqual(actual_kconfig, expected_kconfig)
class KUnitParserTest(unittest.TestCase):
@@ -222,23 +210,14 @@ class KUnitParserTest(unittest.TestCase):
def test_no_kunit_output(self):
crash_log = test_data_path('test_insufficient_memory.log')
- print_mock = mock.patch('builtins.print').start()
+ print_mock = mock.patch('kunit_printer.Printer.print').start()
with open(crash_log) as file:
result = kunit_parser.parse_run_tests(
kunit_parser.extract_tap_lines(file.readlines()))
- print_mock.assert_any_call(StrContains('invalid KTAP input!'))
+ print_mock.assert_any_call(StrContains('could not find any KTAP output!'))
print_mock.stop()
self.assertEqual(0, len(result.subtests))
- def test_crashed_test(self):
- crashed_log = test_data_path('test_is_test_passed-crash.log')
- with open(crashed_log) as file:
- result = kunit_parser.parse_run_tests(
- file.readlines())
- self.assertEqual(
- kunit_parser.TestStatus.TEST_CRASHED,
- result.status)
-
def test_skipped_test(self):
skipped_log = test_data_path('test_skip_tests.log')
with open(skipped_log) as file:
@@ -260,8 +239,8 @@ class KUnitParserTest(unittest.TestCase):
def test_ignores_hyphen(self):
hyphen_log = test_data_path('test_strip_hyphen.log')
- file = open(hyphen_log)
- result = kunit_parser.parse_run_tests(file.readlines())
+ with open(hyphen_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
# A skipped test does not fail the whole suite.
self.assertEqual(
@@ -356,7 +335,7 @@ class LineStreamTest(unittest.TestCase):
called_times = 0
def generator():
nonlocal called_times
- for i in range(1,5):
+ for _ in range(1,5):
called_times += 1
yield called_times, str(called_times)
@@ -377,21 +356,53 @@ class LinuxSourceTreeTest(unittest.TestCase):
def test_invalid_kunitconfig(self):
with self.assertRaisesRegex(kunit_kernel.ConfigError, 'nonexistent.* does not exist'):
- kunit_kernel.LinuxSourceTree('', kunitconfig_path='/nonexistent_file')
+ kunit_kernel.LinuxSourceTree('', kunitconfig_paths=['/nonexistent_file'])
def test_valid_kunitconfig(self):
with tempfile.NamedTemporaryFile('wt') as kunitconfig:
- kunit_kernel.LinuxSourceTree('', kunitconfig_path=kunitconfig.name)
+ kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[kunitconfig.name])
def test_dir_kunitconfig(self):
with tempfile.TemporaryDirectory('') as dir:
with open(os.path.join(dir, '.kunitconfig'), 'w'):
pass
- kunit_kernel.LinuxSourceTree('', kunitconfig_path=dir)
+ kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[dir])
+
+ def test_multiple_kunitconfig(self):
+ want_kconfig = kunit_config.Kconfig()
+ want_kconfig.add_entry('KUNIT', 'y')
+ want_kconfig.add_entry('KUNIT_TEST', 'm')
+
+ with tempfile.TemporaryDirectory('') as dir:
+ other = os.path.join(dir, 'otherkunitconfig')
+ with open(os.path.join(dir, '.kunitconfig'), 'w') as f:
+ f.write('CONFIG_KUNIT=y')
+ with open(other, 'w') as f:
+ f.write('CONFIG_KUNIT_TEST=m')
+ pass
+
+ tree = kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[dir, other])
+ self.assertTrue(want_kconfig.is_subset_of(tree._kconfig), msg=tree._kconfig)
+
+
+ def test_multiple_kunitconfig_invalid(self):
+ with tempfile.TemporaryDirectory('') as dir:
+ other = os.path.join(dir, 'otherkunitconfig')
+ with open(os.path.join(dir, '.kunitconfig'), 'w') as f:
+ f.write('CONFIG_KUNIT=y')
+ with open(other, 'w') as f:
+ f.write('CONFIG_KUNIT=m')
+
+ with self.assertRaisesRegex(kunit_kernel.ConfigError, '(?s)Multiple values.*CONFIG_KUNIT'):
+ kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[dir, other])
+
def test_kconfig_add(self):
+ want_kconfig = kunit_config.Kconfig()
+ want_kconfig.add_entry('NOT_REAL', 'y')
+
tree = kunit_kernel.LinuxSourceTree('', kconfig_add=['CONFIG_NOT_REAL=y'])
- self.assertIn(kunit_config.KconfigEntry('NOT_REAL', 'y'), tree._kconfig.entries())
+ self.assertTrue(want_kconfig.is_subset_of(tree._kconfig), msg=tree._kconfig)
def test_invalid_arch(self):
with self.assertRaisesRegex(kunit_kernel.ConfigError, 'not a valid arch, options are.*x86_64'):
@@ -402,7 +413,7 @@ class LinuxSourceTreeTest(unittest.TestCase):
return subprocess.Popen(['echo "hi\nbye"'], shell=True, text=True, stdout=subprocess.PIPE)
with tempfile.TemporaryDirectory('') as build_dir:
- tree = kunit_kernel.LinuxSourceTree(build_dir, load_config=False)
+ tree = kunit_kernel.LinuxSourceTree(build_dir)
mock.patch.object(tree._ops, 'start', side_effect=fake_start).start()
with self.assertRaises(ValueError):
@@ -419,6 +430,10 @@ class LinuxSourceTreeTest(unittest.TestCase):
f.write('CONFIG_KUNIT=y')
tree = kunit_kernel.LinuxSourceTree(build_dir)
+ # Stub out the source tree operations, so we don't have
+ # the defaults for any given architecture get in the
+ # way.
+ tree._ops = kunit_kernel.LinuxSourceTreeOperations('none', None)
mock_build_config = mock.patch.object(tree, 'build_config').start()
# Should generate the .config
@@ -436,6 +451,10 @@ class LinuxSourceTreeTest(unittest.TestCase):
f.write('CONFIG_KUNIT=y\nCONFIG_KUNIT_TEST=y')
tree = kunit_kernel.LinuxSourceTree(build_dir)
+ # Stub out the source tree operations, so we don't have
+ # the defaults for any given architecture get in the
+ # way.
+ tree._ops = kunit_kernel.LinuxSourceTreeOperations('none', None)
mock_build_config = mock.patch.object(tree, 'build_config').start()
self.assertTrue(tree.build_reconfig(build_dir, make_options=[]))
@@ -452,6 +471,10 @@ class LinuxSourceTreeTest(unittest.TestCase):
f.write('CONFIG_KUNIT=y\nCONFIG_KUNIT_TEST=y')
tree = kunit_kernel.LinuxSourceTree(build_dir)
+ # Stub out the source tree operations, so we don't have
+ # the defaults for any given architecture get in the
+ # way.
+ tree._ops = kunit_kernel.LinuxSourceTreeOperations('none', None)
mock_build_config = mock.patch.object(tree, 'build_config').start()
# ... so we should trigger a call to build_config()
@@ -468,9 +491,7 @@ class KUnitJsonTest(unittest.TestCase):
test_result = kunit_parser.parse_run_tests(file)
json_obj = kunit_json.get_json_result(
test=test_result,
- def_config='kunit_defconfig',
- build_dir=None,
- json_path='stdout')
+ metadata=kunit_json.Metadata())
return json.loads(json_obj)
def test_failed_test_json(self):
@@ -480,10 +501,10 @@ class KUnitJsonTest(unittest.TestCase):
result["sub_groups"][1]["test_cases"][0])
def test_crashed_test_json(self):
- result = self._json_for('test_is_test_passed-crash.log')
+ result = self._json_for('test_kernel_panic_interrupt.log')
self.assertEqual(
- {'name': 'example_simple_test', 'status': 'ERROR'},
- result["sub_groups"][1]["test_cases"][0])
+ {'name': '', 'status': 'ERROR'},
+ result["sub_groups"][2]["test_cases"][1])
def test_skipped_test_json(self):
result = self._json_for('test_skip_tests.log')
@@ -511,27 +532,28 @@ class KUnitMainTest(unittest.TestCase):
with open(path) as file:
all_passed_log = file.readlines()
- self.print_mock = mock.patch('builtins.print').start()
+ self.print_mock = mock.patch('kunit_printer.Printer.print').start()
self.addCleanup(mock.patch.stopall)
- self.linux_source_mock = mock.Mock()
- self.linux_source_mock.build_reconfig = mock.Mock(return_value=True)
- self.linux_source_mock.build_kernel = mock.Mock(return_value=True)
- self.linux_source_mock.run_kernel = mock.Mock(return_value=all_passed_log)
+ self.mock_linux_init = mock.patch.object(kunit_kernel, 'LinuxSourceTree').start()
+ self.linux_source_mock = self.mock_linux_init.return_value
+ self.linux_source_mock.build_reconfig.return_value = True
+ self.linux_source_mock.build_kernel.return_value = True
+ self.linux_source_mock.run_kernel.return_value = all_passed_log
def test_config_passes_args_pass(self):
- kunit.main(['config', '--build_dir=.kunit'], self.linux_source_mock)
+ kunit.main(['config', '--build_dir=.kunit'])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 0)
def test_build_passes_args_pass(self):
- kunit.main(['build'], self.linux_source_mock)
+ kunit.main(['build'])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.linux_source_mock.build_kernel.assert_called_once_with(False, kunit.get_default_jobs(), '.kunit', None)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 0)
def test_exec_passes_args_pass(self):
- kunit.main(['exec'], self.linux_source_mock)
+ kunit.main(['exec'])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 0)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
@@ -539,7 +561,7 @@ class KUnitMainTest(unittest.TestCase):
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_passes_args_pass(self):
- kunit.main(['run'], self.linux_source_mock)
+ kunit.main(['run'])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
@@ -549,29 +571,30 @@ class KUnitMainTest(unittest.TestCase):
def test_exec_passes_args_fail(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
with self.assertRaises(SystemExit) as e:
- kunit.main(['exec'], self.linux_source_mock)
+ kunit.main(['exec'])
self.assertEqual(e.exception.code, 1)
def test_run_passes_args_fail(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
with self.assertRaises(SystemExit) as e:
- kunit.main(['run'], self.linux_source_mock)
+ kunit.main(['run'])
self.assertEqual(e.exception.code, 1)
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
- self.print_mock.assert_any_call(StrContains('invalid KTAP input!'))
+ self.print_mock.assert_any_call(StrContains('could not find any KTAP output!'))
def test_exec_no_tests(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=['TAP version 14', '1..0'])
with self.assertRaises(SystemExit) as e:
- kunit.main(['run'], self.linux_source_mock)
+ kunit.main(['run'])
+ self.assertEqual(e.exception.code, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir='.kunit', filter_glob='', timeout=300)
self.print_mock.assert_any_call(StrContains(' 0 tests run!'))
def test_exec_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
- kunit.main(['exec', '--raw_output'], self.linux_source_mock)
+ kunit.main(['exec', '--raw_output'])
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
for call in self.print_mock.call_args_list:
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
@@ -579,7 +602,7 @@ class KUnitMainTest(unittest.TestCase):
def test_run_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
- kunit.main(['run', '--raw_output'], self.linux_source_mock)
+ kunit.main(['run', '--raw_output'])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
for call in self.print_mock.call_args_list:
@@ -588,31 +611,37 @@ class KUnitMainTest(unittest.TestCase):
def test_run_raw_output_kunit(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
- kunit.main(['run', '--raw_output=kunit'], self.linux_source_mock)
+ kunit.main(['run', '--raw_output=kunit'])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
for call in self.print_mock.call_args_list:
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
+ def test_run_raw_output_invalid(self):
+ self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
+ with self.assertRaises(SystemExit) as e:
+ kunit.main(['run', '--raw_output=invalid'])
+ self.assertNotEqual(e.exception.code, 0)
+
def test_run_raw_output_does_not_take_positional_args(self):
# --raw_output is a string flag, but we don't want it to consume
# any positional arguments, only ones after an '='
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
- kunit.main(['run', '--raw_output', 'filter_glob'], self.linux_source_mock)
+ kunit.main(['run', '--raw_output', 'filter_glob'])
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir='.kunit', filter_glob='filter_glob', timeout=300)
def test_exec_timeout(self):
timeout = 3453
- kunit.main(['exec', '--timeout', str(timeout)], self.linux_source_mock)
+ kunit.main(['exec', '--timeout', str(timeout)])
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir='.kunit', filter_glob='', timeout=timeout)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_timeout(self):
timeout = 3453
- kunit.main(['run', '--timeout', str(timeout)], self.linux_source_mock)
+ kunit.main(['run', '--timeout', str(timeout)])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir='.kunit', filter_glob='', timeout=timeout)
@@ -620,7 +649,7 @@ class KUnitMainTest(unittest.TestCase):
def test_run_builddir(self):
build_dir = '.kunit'
- kunit.main(['run', '--build_dir=.kunit'], self.linux_source_mock)
+ kunit.main(['run', '--build_dir=.kunit'])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir=build_dir, filter_glob='', timeout=300)
@@ -628,60 +657,81 @@ class KUnitMainTest(unittest.TestCase):
def test_config_builddir(self):
build_dir = '.kunit'
- kunit.main(['config', '--build_dir', build_dir], self.linux_source_mock)
+ kunit.main(['config', '--build_dir', build_dir])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
def test_build_builddir(self):
build_dir = '.kunit'
jobs = kunit.get_default_jobs()
- kunit.main(['build', '--build_dir', build_dir], self.linux_source_mock)
+ kunit.main(['build', '--build_dir', build_dir])
self.linux_source_mock.build_kernel.assert_called_once_with(False, jobs, build_dir, None)
def test_exec_builddir(self):
build_dir = '.kunit'
- kunit.main(['exec', '--build_dir', build_dir], self.linux_source_mock)
+ kunit.main(['exec', '--build_dir', build_dir])
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir=build_dir, filter_glob='', timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
- @mock.patch.object(kunit_kernel, 'LinuxSourceTree')
- def test_run_kunitconfig(self, mock_linux_init):
- mock_linux_init.return_value = self.linux_source_mock
+ def test_run_kunitconfig(self):
kunit.main(['run', '--kunitconfig=mykunitconfig'])
# Just verify that we parsed and initialized it correctly here.
- mock_linux_init.assert_called_once_with('.kunit',
- kunitconfig_path='mykunitconfig',
- kconfig_add=None,
- arch='um',
- cross_compile=None,
- qemu_config_path=None)
+ self.mock_linux_init.assert_called_once_with('.kunit',
+ kunitconfig_paths=['mykunitconfig'],
+ kconfig_add=None,
+ arch='um',
+ cross_compile=None,
+ qemu_config_path=None,
+ extra_qemu_args=[])
+
+ def test_config_kunitconfig(self):
+ kunit.main(['config', '--kunitconfig=mykunitconfig'])
+ # Just verify that we parsed and initialized it correctly here.
+ self.mock_linux_init.assert_called_once_with('.kunit',
+ kunitconfig_paths=['mykunitconfig'],
+ kconfig_add=None,
+ arch='um',
+ cross_compile=None,
+ qemu_config_path=None,
+ extra_qemu_args=[])
@mock.patch.object(kunit_kernel, 'LinuxSourceTree')
- def test_config_kunitconfig(self, mock_linux_init):
+ def test_run_multiple_kunitconfig(self, mock_linux_init):
mock_linux_init.return_value = self.linux_source_mock
- kunit.main(['config', '--kunitconfig=mykunitconfig'])
+ kunit.main(['run', '--kunitconfig=mykunitconfig', '--kunitconfig=other'])
# Just verify that we parsed and initialized it correctly here.
mock_linux_init.assert_called_once_with('.kunit',
- kunitconfig_path='mykunitconfig',
+ kunitconfig_paths=['mykunitconfig', 'other'],
kconfig_add=None,
arch='um',
cross_compile=None,
- qemu_config_path=None)
+ qemu_config_path=None,
+ extra_qemu_args=[])
- @mock.patch.object(kunit_kernel, 'LinuxSourceTree')
- def test_run_kconfig_add(self, mock_linux_init):
- mock_linux_init.return_value = self.linux_source_mock
+ def test_run_kconfig_add(self):
kunit.main(['run', '--kconfig_add=CONFIG_KASAN=y', '--kconfig_add=CONFIG_KCSAN=y'])
# Just verify that we parsed and initialized it correctly here.
- mock_linux_init.assert_called_once_with('.kunit',
- kunitconfig_path=None,
- kconfig_add=['CONFIG_KASAN=y', 'CONFIG_KCSAN=y'],
- arch='um',
- cross_compile=None,
- qemu_config_path=None)
+ self.mock_linux_init.assert_called_once_with('.kunit',
+ kunitconfig_paths=None,
+ kconfig_add=['CONFIG_KASAN=y', 'CONFIG_KCSAN=y'],
+ arch='um',
+ cross_compile=None,
+ qemu_config_path=None,
+ extra_qemu_args=[])
+
+ def test_run_qemu_args(self):
+ kunit.main(['run', '--arch=x86_64', '--qemu_args', '-m 2048'])
+ # Just verify that we parsed and initialized it correctly here.
+ self.mock_linux_init.assert_called_once_with('.kunit',
+ kunitconfig_paths=None,
+ kconfig_add=None,
+ arch='x86_64',
+ cross_compile=None,
+ qemu_config_path=None,
+ extra_qemu_args=['-m', '2048'])
def test_run_kernel_args(self):
- kunit.main(['run', '--kernel_args=a=1', '--kernel_args=b=2'], self.linux_source_mock)
+ kunit.main(['run', '--kernel_args=a=1', '--kernel_args=b=2'])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=['a=1','b=2'], build_dir='.kunit', filter_glob='', timeout=300)
@@ -692,7 +742,7 @@ class KUnitMainTest(unittest.TestCase):
self.linux_source_mock.run_kernel.return_value = ['TAP version 14', 'init: random output'] + want
got = kunit._list_tests(self.linux_source_mock,
- kunit.KunitExecRequest(None, '.kunit', None, 300, False, 'suite*', None, 'suite'))
+ kunit.KunitExecRequest(None, None, '.kunit', 300, False, 'suite*', None, 'suite'))
self.assertEqual(got, want)
# Should respect the user's filter glob when listing tests.
@@ -703,11 +753,11 @@ class KUnitMainTest(unittest.TestCase):
@mock.patch.object(kunit, '_list_tests')
def test_run_isolated_by_suite(self, mock_tests):
mock_tests.return_value = ['suite.test1', 'suite.test2', 'suite2.test1']
- kunit.main(['exec', '--run_isolated=suite', 'suite*.test*'], self.linux_source_mock)
+ kunit.main(['exec', '--run_isolated=suite', 'suite*.test*'])
# Should respect the user's filter glob when listing tests.
mock_tests.assert_called_once_with(mock.ANY,
- kunit.KunitExecRequest(None, '.kunit', None, 300, False, 'suite*.test*', None, 'suite'))
+ kunit.KunitExecRequest(None, None, '.kunit', 300, False, 'suite*.test*', None, 'suite'))
self.linux_source_mock.run_kernel.assert_has_calls([
mock.call(args=None, build_dir='.kunit', filter_glob='suite.test*', timeout=300),
mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test*', timeout=300),
@@ -716,11 +766,11 @@ class KUnitMainTest(unittest.TestCase):
@mock.patch.object(kunit, '_list_tests')
def test_run_isolated_by_test(self, mock_tests):
mock_tests.return_value = ['suite.test1', 'suite.test2', 'suite2.test1']
- kunit.main(['exec', '--run_isolated=test', 'suite*'], self.linux_source_mock)
+ kunit.main(['exec', '--run_isolated=test', 'suite*'])
# Should respect the user's filter glob when listing tests.
mock_tests.assert_called_once_with(mock.ANY,
- kunit.KunitExecRequest(None, '.kunit', None, 300, False, 'suite*', None, 'test'))
+ kunit.KunitExecRequest(None, None, '.kunit', 300, False, 'suite*', None, 'test'))
self.linux_source_mock.run_kernel.assert_has_calls([
mock.call(args=None, build_dir='.kunit', filter_glob='suite.test1', timeout=300),
mock.call(args=None, build_dir='.kunit', filter_glob='suite.test2', timeout=300),
diff --git a/tools/testing/kunit/qemu_config.py b/tools/testing/kunit/qemu_config.py
index 1672f6184e95..0b6a80398ccc 100644
--- a/tools/testing/kunit/qemu_config.py
+++ b/tools/testing/kunit/qemu_config.py
@@ -5,12 +5,15 @@
# Copyright (C) 2021, Google LLC.
# Author: Brendan Higgins <brendanhiggins@google.com>
-from collections import namedtuple
+from dataclasses import dataclass
+from typing import List
-QemuArchParams = namedtuple('QemuArchParams', ['linux_arch',
- 'kconfig',
- 'qemu_arch',
- 'kernel_path',
- 'kernel_command_line',
- 'extra_qemu_params'])
+@dataclass(frozen=True)
+class QemuArchParams:
+ linux_arch: str
+ kconfig: str
+ qemu_arch: str
+ kernel_path: str
+ kernel_command_line: str
+ extra_qemu_params: List[str]
diff --git a/tools/testing/kunit/qemu_configs/alpha.py b/tools/testing/kunit/qemu_configs/alpha.py
index 5d0c0cff03bd..3ac846e03a6b 100644
--- a/tools/testing/kunit/qemu_configs/alpha.py
+++ b/tools/testing/kunit/qemu_configs/alpha.py
@@ -7,4 +7,4 @@ CONFIG_SERIAL_8250_CONSOLE=y''',
qemu_arch='alpha',
kernel_path='arch/alpha/boot/vmlinux',
kernel_command_line='console=ttyS0',
- extra_qemu_params=[''])
+ extra_qemu_params=[])
diff --git a/tools/testing/kunit/qemu_configs/arm.py b/tools/testing/kunit/qemu_configs/arm.py
index b9c2a35e0296..db2160200566 100644
--- a/tools/testing/kunit/qemu_configs/arm.py
+++ b/tools/testing/kunit/qemu_configs/arm.py
@@ -10,4 +10,4 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y''',
qemu_arch='arm',
kernel_path='arch/arm/boot/zImage',
kernel_command_line='console=ttyAMA0',
- extra_qemu_params=['-machine virt'])
+ extra_qemu_params=['-machine', 'virt'])
diff --git a/tools/testing/kunit/qemu_configs/arm64.py b/tools/testing/kunit/qemu_configs/arm64.py
index 517c04459f47..67d04064f785 100644
--- a/tools/testing/kunit/qemu_configs/arm64.py
+++ b/tools/testing/kunit/qemu_configs/arm64.py
@@ -9,4 +9,4 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y''',
qemu_arch='aarch64',
kernel_path='arch/arm64/boot/Image.gz',
kernel_command_line='console=ttyAMA0',
- extra_qemu_params=['-machine virt', '-cpu cortex-a57'])
+ extra_qemu_params=['-machine', 'virt', '-cpu', 'cortex-a57'])
diff --git a/tools/testing/kunit/qemu_configs/i386.py b/tools/testing/kunit/qemu_configs/i386.py
index aed3ffd3937d..4463ebefd567 100644
--- a/tools/testing/kunit/qemu_configs/i386.py
+++ b/tools/testing/kunit/qemu_configs/i386.py
@@ -4,7 +4,7 @@ QEMU_ARCH = QemuArchParams(linux_arch='i386',
kconfig='''
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y''',
- qemu_arch='x86_64',
+ qemu_arch='i386',
kernel_path='arch/x86/boot/bzImage',
kernel_command_line='console=ttyS0',
- extra_qemu_params=[''])
+ extra_qemu_params=[])
diff --git a/tools/testing/kunit/qemu_configs/powerpc.py b/tools/testing/kunit/qemu_configs/powerpc.py
index 35e9de24f0db..7ec38d4131f7 100644
--- a/tools/testing/kunit/qemu_configs/powerpc.py
+++ b/tools/testing/kunit/qemu_configs/powerpc.py
@@ -9,4 +9,4 @@ CONFIG_HVC_CONSOLE=y''',
qemu_arch='ppc64',
kernel_path='vmlinux',
kernel_command_line='console=ttyS0',
- extra_qemu_params=['-M pseries', '-cpu power8'])
+ extra_qemu_params=['-M', 'pseries', '-cpu', 'power8'])
diff --git a/tools/testing/kunit/qemu_configs/riscv.py b/tools/testing/kunit/qemu_configs/riscv.py
index 9e528087cd7c..6207be146d26 100644
--- a/tools/testing/kunit/qemu_configs/riscv.py
+++ b/tools/testing/kunit/qemu_configs/riscv.py
@@ -21,11 +21,12 @@ CONFIG_SOC_VIRT=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_RISCV_SBI_V01=y
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y''',
qemu_arch='riscv64',
kernel_path='arch/riscv/boot/Image',
kernel_command_line='console=ttyS0',
extra_qemu_params=[
- '-machine virt',
- '-cpu rv64',
- '-bios opensbi-riscv64-generic-fw_dynamic.bin'])
+ '-machine', 'virt',
+ '-cpu', 'rv64',
+ '-bios', 'opensbi-riscv64-generic-fw_dynamic.bin'])
diff --git a/tools/testing/kunit/qemu_configs/s390.py b/tools/testing/kunit/qemu_configs/s390.py
index e310bd521113..98fa4fb60c0a 100644
--- a/tools/testing/kunit/qemu_configs/s390.py
+++ b/tools/testing/kunit/qemu_configs/s390.py
@@ -10,5 +10,5 @@ CONFIG_MODULES=y''',
kernel_path='arch/s390/boot/bzImage',
kernel_command_line='console=ttyS0',
extra_qemu_params=[
- '-machine s390-ccw-virtio',
- '-cpu qemu',])
+ '-machine', 's390-ccw-virtio',
+ '-cpu', 'qemu',])
diff --git a/tools/testing/kunit/qemu_configs/sparc.py b/tools/testing/kunit/qemu_configs/sparc.py
index 27f474e7ad6e..e975c4331a7c 100644
--- a/tools/testing/kunit/qemu_configs/sparc.py
+++ b/tools/testing/kunit/qemu_configs/sparc.py
@@ -7,4 +7,4 @@ CONFIG_SERIAL_8250_CONSOLE=y''',
qemu_arch='sparc',
kernel_path='arch/sparc/boot/zImage',
kernel_command_line='console=ttyS0 mem=256M',
- extra_qemu_params=['-m 256'])
+ extra_qemu_params=['-m', '256'])
diff --git a/tools/testing/kunit/qemu_configs/x86_64.py b/tools/testing/kunit/qemu_configs/x86_64.py
index 77ab1aeee8a3..dc7949076863 100644
--- a/tools/testing/kunit/qemu_configs/x86_64.py
+++ b/tools/testing/kunit/qemu_configs/x86_64.py
@@ -7,4 +7,4 @@ CONFIG_SERIAL_8250_CONSOLE=y''',
qemu_arch='x86_64',
kernel_path='arch/x86/boot/bzImage',
kernel_command_line='console=ttyS0',
- extra_qemu_params=[''])
+ extra_qemu_params=[])
diff --git a/tools/testing/kunit/run_checks.py b/tools/testing/kunit/run_checks.py
index 13d854afca9d..066e6f938f6d 100755
--- a/tools/testing/kunit/run_checks.py
+++ b/tools/testing/kunit/run_checks.py
@@ -14,7 +14,7 @@ import shutil
import subprocess
import sys
import textwrap
-from typing import Dict, List, Sequence, Tuple
+from typing import Dict, List, Sequence
ABS_TOOL_PATH = os.path.abspath(os.path.dirname(__file__))
TIMEOUT = datetime.timedelta(minutes=5).total_seconds()
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-crash.log b/tools/testing/kunit/test_data/test_is_test_passed-crash.log
deleted file mode 100644
index 4d97f6708c4a..000000000000
--- a/tools/testing/kunit/test_data/test_is_test_passed-crash.log
+++ /dev/null
@@ -1,70 +0,0 @@
-printk: console [tty0] enabled
-printk: console [mc-1] enabled
-TAP version 14
-1..2
- # Subtest: sysctl_test
- 1..8
- # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
- ok 1 - sysctl_test_dointvec_null_tbl_data
- # sysctl_test_dointvec_table_maxlen_unset: sysctl_test_dointvec_table_maxlen_unset passed
- ok 2 - sysctl_test_dointvec_table_maxlen_unset
- # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed
- ok 3 - sysctl_test_dointvec_table_len_is_zero
- # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed
- ok 4 - sysctl_test_dointvec_table_read_but_position_set
- # sysctl_test_dointvec_happy_single_positive: sysctl_test_dointvec_happy_single_positive passed
- ok 5 - sysctl_test_dointvec_happy_single_positive
- # sysctl_test_dointvec_happy_single_negative: sysctl_test_dointvec_happy_single_negative passed
- ok 6 - sysctl_test_dointvec_happy_single_negative
- # sysctl_test_dointvec_single_less_int_min: sysctl_test_dointvec_single_less_int_min passed
- ok 7 - sysctl_test_dointvec_single_less_int_min
- # sysctl_test_dointvec_single_greater_int_max: sysctl_test_dointvec_single_greater_int_max passed
- ok 8 - sysctl_test_dointvec_single_greater_int_max
-kunit sysctl_test: all tests passed
-ok 1 - sysctl_test
- # Subtest: example
- 1..2
-init_suite
- # example_simple_test: initializing
-Stack:
- 6016f7db 6f81bd30 6f81bdd0 60021450
- 6024b0e8 60021440 60018bbe 16f81bdc0
- 00000001 6f81bd30 6f81bd20 6f81bdd0
-Call Trace:
- [<6016f7db>] ? kunit_try_run_case+0xab/0xf0
- [<60021450>] ? set_signals+0x0/0x60
- [<60021440>] ? get_signals+0x0/0x10
- [<60018bbe>] ? kunit_um_run_try_catch+0x5e/0xc0
- [<60021450>] ? set_signals+0x0/0x60
- [<60021440>] ? get_signals+0x0/0x10
- [<60018bb3>] ? kunit_um_run_try_catch+0x53/0xc0
- [<6016f321>] ? kunit_run_case_catch_errors+0x121/0x1a0
- [<60018b60>] ? kunit_um_run_try_catch+0x0/0xc0
- [<600189e0>] ? kunit_um_throw+0x0/0x180
- [<6016f730>] ? kunit_try_run_case+0x0/0xf0
- [<6016f600>] ? kunit_catch_run_case+0x0/0x130
- [<6016edd0>] ? kunit_vprintk+0x0/0x30
- [<6016ece0>] ? kunit_fail+0x0/0x40
- [<6016eca0>] ? kunit_abort+0x0/0x40
- [<6016ed20>] ? kunit_printk_emit+0x0/0xb0
- [<6016f200>] ? kunit_run_case_catch_errors+0x0/0x1a0
- [<6016f46e>] ? kunit_run_tests+0xce/0x260
- [<6005b390>] ? unregister_console+0x0/0x190
- [<60175b70>] ? suite_kunit_initexample_test_suite+0x0/0x20
- [<60001cbb>] ? do_one_initcall+0x0/0x197
- [<60001d47>] ? do_one_initcall+0x8c/0x197
- [<6005cd20>] ? irq_to_desc+0x0/0x30
- [<60002005>] ? kernel_init_freeable+0x1b3/0x272
- [<6005c5ec>] ? printk+0x0/0x9b
- [<601c0086>] ? kernel_init+0x26/0x160
- [<60014442>] ? new_thread_handler+0x82/0xc0
-
- # example_simple_test: kunit test case crashed!
- # example_simple_test: example_simple_test failed
- not ok 1 - example_simple_test
- # example_mock_test: initializing
- # example_mock_test: example_mock_test passed
- ok 2 - example_mock_test
-kunit example: one or more tests failed
-not ok 2 - example
-List of all partitions:
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log
index dd873c981108..4f81876ee6f1 100644
--- a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log
+++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log
@@ -3,5 +3,5 @@ TAP version 14
# Subtest: suite
1..1
# Subtest: case
- ok 1 - case # SKIP
+ ok 1 - case
ok 1 - suite
diff --git a/tools/testing/memblock/TODO b/tools/testing/memblock/TODO
index c25b2fdec45e..cd1a30d5acc9 100644
--- a/tools/testing/memblock/TODO
+++ b/tools/testing/memblock/TODO
@@ -23,6 +23,3 @@ TODO
5. Add tests for memblock_alloc_node() to check if the correct NUMA node is set
for the new region
-
-6. Update comments in tests/basic_api.c to match the style used in
- tests/alloc_*.c
diff --git a/tools/testing/memblock/tests/basic_api.c b/tools/testing/memblock/tests/basic_api.c
index fbc1ce160303..a7bc180316d6 100644
--- a/tools/testing/memblock/tests/basic_api.c
+++ b/tools/testing/memblock/tests/basic_api.c
@@ -26,8 +26,8 @@ static int memblock_initialization_check(void)
/*
* A simple test that adds a memory block of a specified base address
* and size to the collection of available memory regions (memblock.memory).
- * It checks if a new entry was created and if region counter and total memory
- * were correctly updated.
+ * Expect to create a new entry. The region counter and total memory get
+ * updated.
*/
static int memblock_add_simple_check(void)
{
@@ -53,10 +53,10 @@ static int memblock_add_simple_check(void)
}
/*
- * A simple test that adds a memory block of a specified base address, size
+ * A simple test that adds a memory block of a specified base address, size,
* NUMA node and memory flags to the collection of available memory regions.
- * It checks if the new entry, region counter and total memory size have
- * expected values.
+ * Expect to create a new entry. The region counter and total memory get
+ * updated.
*/
static int memblock_add_node_simple_check(void)
{
@@ -87,9 +87,15 @@ static int memblock_add_node_simple_check(void)
/*
* A test that tries to add two memory blocks that don't overlap with one
- * another. It checks if two correctly initialized entries were added to the
- * collection of available memory regions (memblock.memory) and if this
- * change was reflected in memblock.memory's total size and region counter.
+ * another:
+ *
+ * | +--------+ +--------+ |
+ * | | r1 | | r2 | |
+ * +--------+--------+--------+--------+--+
+ *
+ * Expect to add two correctly initialized entries to the collection of
+ * available memory regions (memblock.memory). The total size and
+ * region counter fields get updated.
*/
static int memblock_add_disjoint_check(void)
{
@@ -124,11 +130,21 @@ static int memblock_add_disjoint_check(void)
}
/*
- * A test that tries to add two memory blocks, where the second one overlaps
- * with the beginning of the first entry (that is r1.base < r2.base + r2.size).
- * After this, it checks if two entries are merged into one region that starts
- * at r2.base and has size of two regions minus their intersection. It also
- * verifies the reported total size of the available memory and region counter.
+ * A test that tries to add two memory blocks r1 and r2, where r2 overlaps
+ * with the beginning of r1 (that is r1.base < r2.base + r2.size):
+ *
+ * | +----+----+------------+ |
+ * | | |r2 | r1 | |
+ * +----+----+----+------------+----------+
+ * ^ ^
+ * | |
+ * | r1.base
+ * |
+ * r2.base
+ *
+ * Expect to merge the two entries into one region that starts at r2.base
+ * and has size of two regions minus their intersection. The total size of
+ * the available memory is updated, and the region counter stays the same.
*/
static int memblock_add_overlap_top_check(void)
{
@@ -162,12 +178,21 @@ static int memblock_add_overlap_top_check(void)
}
/*
- * A test that tries to add two memory blocks, where the second one overlaps
- * with the end of the first entry (that is r2.base < r1.base + r1.size).
- * After this, it checks if two entries are merged into one region that starts
- * at r1.base and has size of two regions minus their intersection. It verifies
- * that memblock can still see only one entry and has a correct total size of
- * the available memory.
+ * A test that tries to add two memory blocks r1 and r2, where r2 overlaps
+ * with the end of r1 (that is r2.base < r1.base + r1.size):
+ *
+ * | +--+------+----------+ |
+ * | | | r1 | r2 | |
+ * +--+--+------+----------+--------------+
+ * ^ ^
+ * | |
+ * | r2.base
+ * |
+ * r1.base
+ *
+ * Expect to merge the two entries into one region that starts at r1.base
+ * and has size of two regions minus their intersection. The total size of
+ * the available memory is updated, and the region counter stays the same.
*/
static int memblock_add_overlap_bottom_check(void)
{
@@ -201,11 +226,19 @@ static int memblock_add_overlap_bottom_check(void)
}
/*
- * A test that tries to add two memory blocks, where the second one is
- * within the range of the first entry (that is r1.base < r2.base &&
- * r2.base + r2.size < r1.base + r1.size). It checks if two entries are merged
- * into one region that stays the same. The counter and total size of available
- * memory are expected to not be updated.
+ * A test that tries to add two memory blocks r1 and r2, where r2 is
+ * within the range of r1 (that is r1.base < r2.base &&
+ * r2.base + r2.size < r1.base + r1.size):
+ *
+ * | +-------+--+-----------------------+
+ * | | |r2| r1 |
+ * +---+-------+--+-----------------------+
+ * ^
+ * |
+ * r1.base
+ *
+ * Expect to merge two entries into one region that stays the same.
+ * The counter and total size of available memory are not updated.
*/
static int memblock_add_within_check(void)
{
@@ -236,8 +269,8 @@ static int memblock_add_within_check(void)
}
/*
- * A simple test that tries to add the same memory block twice. The counter
- * and total size of available memory are expected to not be updated.
+ * A simple test that tries to add the same memory block twice. Expect
+ * the counter and total size of available memory to not be updated.
*/
static int memblock_add_twice_check(void)
{
@@ -270,12 +303,12 @@ static int memblock_add_checks(void)
return 0;
}
- /*
- * A simple test that marks a memory block of a specified base address
- * and size as reserved and to the collection of reserved memory regions
- * (memblock.reserved). It checks if a new entry was created and if region
- * counter and total memory size were correctly updated.
- */
+/*
+ * A simple test that marks a memory block of a specified base address
+ * and size as reserved and to the collection of reserved memory regions
+ * (memblock.reserved). Expect to create a new entry. The region counter
+ * and total memory size are updated.
+ */
static int memblock_reserve_simple_check(void)
{
struct memblock_region *rgn;
@@ -297,10 +330,15 @@ static int memblock_reserve_simple_check(void)
}
/*
- * A test that tries to mark two memory blocks that don't overlap as reserved
- * and checks if two entries were correctly added to the collection of reserved
- * memory regions (memblock.reserved) and if this change was reflected in
- * memblock.reserved's total size and region counter.
+ * A test that tries to mark two memory blocks that don't overlap as reserved:
+ *
+ * | +--+ +----------------+ |
+ * | |r1| | r2 | |
+ * +--------+--+------+----------------+--+
+ *
+ * Expect to add two entries to the collection of reserved memory regions
+ * (memblock.reserved). The total size and region counter for
+ * memblock.reserved are updated.
*/
static int memblock_reserve_disjoint_check(void)
{
@@ -335,13 +373,22 @@ static int memblock_reserve_disjoint_check(void)
}
/*
- * A test that tries to mark two memory blocks as reserved, where the
- * second one overlaps with the beginning of the first (that is
- * r1.base < r2.base + r2.size).
- * It checks if two entries are merged into one region that starts at r2.base
- * and has size of two regions minus their intersection. The test also verifies
- * that memblock can still see only one entry and has a correct total size of
- * the reserved memory.
+ * A test that tries to mark two memory blocks r1 and r2 as reserved,
+ * where r2 overlaps with the beginning of r1 (that is
+ * r1.base < r2.base + r2.size):
+ *
+ * | +--------------+--+--------------+ |
+ * | | r2 | | r1 | |
+ * +--+--------------+--+--------------+--+
+ * ^ ^
+ * | |
+ * | r1.base
+ * |
+ * r2.base
+ *
+ * Expect to merge two entries into one region that starts at r2.base and
+ * has size of two regions minus their intersection. The total size of the
+ * reserved memory is updated, and the region counter is not updated.
*/
static int memblock_reserve_overlap_top_check(void)
{
@@ -375,13 +422,22 @@ static int memblock_reserve_overlap_top_check(void)
}
/*
- * A test that tries to mark two memory blocks as reserved, where the
- * second one overlaps with the end of the first entry (that is
- * r2.base < r1.base + r1.size).
- * It checks if two entries are merged into one region that starts at r1.base
- * and has size of two regions minus their intersection. It verifies that
- * memblock can still see only one entry and has a correct total size of the
- * reserved memory.
+ * A test that tries to mark two memory blocks r1 and r2 as reserved,
+ * where r2 overlaps with the end of r1 (that is
+ * r2.base < r1.base + r1.size):
+ *
+ * | +--------------+--+--------------+ |
+ * | | r1 | | r2 | |
+ * +--+--------------+--+--------------+--+
+ * ^ ^
+ * | |
+ * | r2.base
+ * |
+ * r1.base
+ *
+ * Expect to merge two entries into one region that starts at r1.base and
+ * has size of two regions minus their intersection. The total size of the
+ * reserved memory is updated, and the region counter is not updated.
*/
static int memblock_reserve_overlap_bottom_check(void)
{
@@ -415,12 +471,21 @@ static int memblock_reserve_overlap_bottom_check(void)
}
/*
- * A test that tries to mark two memory blocks as reserved, where the second
- * one is within the range of the first entry (that is
- * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)).
- * It checks if two entries are merged into one region that stays the
- * same. The counter and total size of available memory are expected to not be
- * updated.
+ * A test that tries to mark two memory blocks r1 and r2 as reserved,
+ * where r2 is within the range of r1 (that is
+ * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)):
+ *
+ * | +-----+--+---------------------------|
+ * | | |r2| r1 |
+ * +-+-----+--+---------------------------+
+ * ^ ^
+ * | |
+ * | r2.base
+ * |
+ * r1.base
+ *
+ * Expect to merge two entries into one region that stays the same. The
+ * counter and total size of available memory are not updated.
*/
static int memblock_reserve_within_check(void)
{
@@ -452,7 +517,7 @@ static int memblock_reserve_within_check(void)
/*
* A simple test that tries to reserve the same memory block twice.
- * The region counter and total size of reserved memory are expected to not
+ * Expect the region counter and total size of reserved memory to not
* be updated.
*/
static int memblock_reserve_twice_check(void)
@@ -485,14 +550,22 @@ static int memblock_reserve_checks(void)
return 0;
}
- /*
- * A simple test that tries to remove the first entry of the array of
- * available memory regions. By "removing" a region we mean overwriting it
- * with the next region in memblock.memory. To check this is the case, the
- * test adds two memory blocks and verifies that the value of the latter
- * was used to erase r1 region. It also checks if the region counter and
- * total size were updated to expected values.
- */
+/*
+ * A simple test that tries to remove a region r1 from the array of
+ * available memory regions. By "removing" a region we mean overwriting it
+ * with the next region r2 in memblock.memory:
+ *
+ * | ...... +----------------+ |
+ * | : r1 : | r2 | |
+ * +--+----+----------+----------------+--+
+ * ^
+ * |
+ * rgn.base
+ *
+ * Expect to add two memory blocks r1 and r2 and then remove r1 so that
+ * r2 is the first available region. The region counter and total size
+ * are updated.
+ */
static int memblock_remove_simple_check(void)
{
struct memblock_region *rgn;
@@ -522,11 +595,22 @@ static int memblock_remove_simple_check(void)
return 0;
}
- /*
- * A test that tries to remove a region that was not registered as available
- * memory (i.e. has no corresponding entry in memblock.memory). It verifies
- * that array, regions counter and total size were not modified.
- */
+/*
+ * A test that tries to remove a region r2 that was not registered as
+ * available memory (i.e. has no corresponding entry in memblock.memory):
+ *
+ * +----------------+
+ * | r2 |
+ * +----------------+
+ * | +----+ |
+ * | | r1 | |
+ * +--+----+------------------------------+
+ * ^
+ * |
+ * rgn.base
+ *
+ * Expect the array, regions counter and total size to not be modified.
+ */
static int memblock_remove_absent_check(void)
{
struct memblock_region *rgn;
@@ -556,11 +640,23 @@ static int memblock_remove_absent_check(void)
}
/*
- * A test that tries to remove a region which overlaps with the beginning of
- * the already existing entry r1 (that is r1.base < r2.base + r2.size). It
- * checks if only the intersection of both regions is removed from the available
- * memory pool. The test also checks if the regions counter and total size are
- * updated to expected values.
+ * A test that tries to remove a region r2 that overlaps with the
+ * beginning of the already existing entry r1
+ * (that is r1.base < r2.base + r2.size):
+ *
+ * +-----------------+
+ * | r2 |
+ * +-----------------+
+ * | .........+--------+ |
+ * | : r1 | rgn | |
+ * +-----------------+--------+--------+--+
+ * ^ ^
+ * | |
+ * | rgn.base
+ * r1.base
+ *
+ * Expect that only the intersection of both regions is removed from the
+ * available memory pool. The regions counter and total size are updated.
*/
static int memblock_remove_overlap_top_check(void)
{
@@ -596,11 +692,21 @@ static int memblock_remove_overlap_top_check(void)
}
/*
- * A test that tries to remove a region which overlaps with the end of the
- * first entry (that is r2.base < r1.base + r1.size). It checks if only the
- * intersection of both regions is removed from the available memory pool.
- * The test also checks if the regions counter and total size are updated to
- * expected values.
+ * A test that tries to remove a region r2 that overlaps with the end of
+ * the already existing region r1 (that is r2.base < r1.base + r1.size):
+ *
+ * +--------------------------------+
+ * | r2 |
+ * +--------------------------------+
+ * | +---+..... |
+ * | |rgn| r1 : |
+ * +-+---+----+---------------------------+
+ * ^
+ * |
+ * r1.base
+ *
+ * Expect that only the intersection of both regions is removed from the
+ * available memory pool. The regions counter and total size are updated.
*/
static int memblock_remove_overlap_bottom_check(void)
{
@@ -633,13 +739,23 @@ static int memblock_remove_overlap_bottom_check(void)
}
/*
- * A test that tries to remove a region which is within the range of the
- * already existing entry (that is
- * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)).
- * It checks if the region is split into two - one that ends at r2.base and
- * second that starts at r2.base + size, with appropriate sizes. The test
- * also checks if the region counter and total size were updated to
- * expected values.
+ * A test that tries to remove a region r2 that is within the range of
+ * the already existing entry r1 (that is
+ * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)):
+ *
+ * +----+
+ * | r2 |
+ * +----+
+ * | +-------------+....+---------------+ |
+ * | | rgn1 | r1 | rgn2 | |
+ * +-+-------------+----+---------------+-+
+ * ^
+ * |
+ * r1.base
+ *
+ * Expect that the region is split into two - one that ends at r2.base and
+ * another that starts at r2.base + r2.size, with appropriate sizes. The
+ * region counter and total size are updated.
*/
static int memblock_remove_within_check(void)
{
@@ -690,12 +806,19 @@ static int memblock_remove_checks(void)
}
/*
- * A simple test that tries to free a memory block that was marked earlier
- * as reserved. By "freeing" a region we mean overwriting it with the next
- * entry in memblock.reserved. To check this is the case, the test reserves
- * two memory regions and verifies that the value of the latter was used to
- * erase r1 region.
- * The test also checks if the region counter and total size were updated.
+ * A simple test that tries to free a memory block r1 that was marked
+ * earlier as reserved. By "freeing" a region we mean overwriting it with
+ * the next entry r2 in memblock.reserved:
+ *
+ * | ...... +----+ |
+ * | : r1 : | r2 | |
+ * +--------------+----+-----------+----+-+
+ * ^
+ * |
+ * rgn.base
+ *
+ * Expect to reserve two memory regions and then erase r1 region with the
+ * value of r2. The region counter and total size are updated.
*/
static int memblock_free_simple_check(void)
{
@@ -726,11 +849,22 @@ static int memblock_free_simple_check(void)
return 0;
}
- /*
- * A test that tries to free a region that was not marked as reserved
- * (i.e. has no corresponding entry in memblock.reserved). It verifies
- * that array, regions counter and total size were not modified.
- */
+/*
+ * A test that tries to free a region r2 that was not marked as reserved
+ * (i.e. has no corresponding entry in memblock.reserved):
+ *
+ * +----------------+
+ * | r2 |
+ * +----------------+
+ * | +----+ |
+ * | | r1 | |
+ * +--+----+------------------------------+
+ * ^
+ * |
+ * rgn.base
+ *
+ * The array, regions counter and total size are not modified.
+ */
static int memblock_free_absent_check(void)
{
struct memblock_region *rgn;
@@ -760,11 +894,23 @@ static int memblock_free_absent_check(void)
}
/*
- * A test that tries to free a region which overlaps with the beginning of
- * the already existing entry r1 (that is r1.base < r2.base + r2.size). It
- * checks if only the intersection of both regions is freed. The test also
- * checks if the regions counter and total size are updated to expected
- * values.
+ * A test that tries to free a region r2 that overlaps with the beginning
+ * of the already existing entry r1 (that is r1.base < r2.base + r2.size):
+ *
+ * +----+
+ * | r2 |
+ * +----+
+ * | ...+--------------+ |
+ * | : | r1 | |
+ * +----+--+--------------+---------------+
+ * ^ ^
+ * | |
+ * | rgn.base
+ * |
+ * r1.base
+ *
+ * Expect that only the intersection of both regions is freed. The
+ * regions counter and total size are updated.
*/
static int memblock_free_overlap_top_check(void)
{
@@ -798,10 +944,18 @@ static int memblock_free_overlap_top_check(void)
}
/*
- * A test that tries to free a region which overlaps with the end of the
- * first entry (that is r2.base < r1.base + r1.size). It checks if only the
- * intersection of both regions is freed. The test also checks if the
- * regions counter and total size are updated to expected values.
+ * A test that tries to free a region r2 that overlaps with the end of
+ * the already existing entry r1 (that is r2.base < r1.base + r1.size):
+ *
+ * +----------------+
+ * | r2 |
+ * +----------------+
+ * | +-----------+..... |
+ * | | r1 | : |
+ * +----+-----------+----+----------------+
+ *
+ * Expect that only the intersection of both regions is freed. The
+ * regions counter and total size are updated.
*/
static int memblock_free_overlap_bottom_check(void)
{
@@ -835,13 +989,23 @@ static int memblock_free_overlap_bottom_check(void)
}
/*
- * A test that tries to free a region which is within the range of the
- * already existing entry (that is
- * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)).
- * It checks if the region is split into two - one that ends at r2.base and
- * second that starts at r2.base + size, with appropriate sizes. It is
- * expected that the region counter and total size fields were updated t
- * reflect that change.
+ * A test that tries to free a region r2 that is within the range of the
+ * already existing entry r1 (that is
+ * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)):
+ *
+ * +----+
+ * | r2 |
+ * +----+
+ * | +------------+....+---------------+
+ * | | rgn1 | r1 | rgn2 |
+ * +----+------------+----+---------------+
+ * ^
+ * |
+ * r1.base
+ *
+ * Expect that the region is split into two - one that ends at r2.base and
+ * another that starts at r2.base + r2.size, with appropriate sizes. The
+ * region counter and total size fields are updated.
*/
static int memblock_free_within_check(void)
{
diff --git a/tools/testing/nvdimm/pmem-dax.c b/tools/testing/nvdimm/pmem-dax.c
index af19c85558e7..c1ec099a3b1d 100644
--- a/tools/testing/nvdimm/pmem-dax.c
+++ b/tools/testing/nvdimm/pmem-dax.c
@@ -4,11 +4,13 @@
*/
#include "test/nfit_test.h"
#include <linux/blkdev.h>
+#include <linux/dax.h>
#include <pmem.h>
#include <nd.h>
long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn)
{
resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index b752ce47ead3..ea956082e6a4 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -62,16 +62,14 @@ struct nfit_test_resource *get_nfit_res(resource_size_t resource)
}
EXPORT_SYMBOL(get_nfit_res);
-static void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
- void __iomem *(*fallback_fn)(resource_size_t, unsigned long))
-{
- struct nfit_test_resource *nfit_res = get_nfit_res(offset);
-
- if (nfit_res)
- return (void __iomem *) nfit_res->buf + offset
- - nfit_res->res.start;
- return fallback_fn(offset, size);
-}
+#define __nfit_test_ioremap(offset, size, fallback_fn) ({ \
+ struct nfit_test_resource *nfit_res = get_nfit_res(offset); \
+ nfit_res ? \
+ (void __iomem *) nfit_res->buf + (offset) \
+ - nfit_res->res.start \
+ : \
+ fallback_fn((offset), (size)) ; \
+})
void __iomem *__wrap_devm_ioremap(struct device *dev,
resource_size_t offset, unsigned long size)
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 1da76ccde448..c75abb497a1a 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -23,8 +23,6 @@
#include "nfit_test.h"
#include "../watermark.h"
-#include <asm/mce.h>
-
/*
* Generate an NFIT table to describe the following topology:
*
@@ -3375,7 +3373,6 @@ static __exit void nfit_test_exit(void)
{
int i;
- flush_workqueue(nfit_wq);
destroy_workqueue(nfit_wq);
for (i = 0; i < NUM_NFITS; i++)
platform_device_unregister(&instances[i]->pdev);
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 2319ec87f53d..5047d8eef53e 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -9,7 +9,9 @@ TARGETS += clone3
TARGETS += core
TARGETS += cpufreq
TARGETS += cpu-hotplug
+TARGETS += damon
TARGETS += drivers/dma-buf
+TARGETS += drivers/s390x/uvdevice
TARGETS += efivarfs
TARGETS += exec
TARGETS += filesystems
@@ -52,6 +54,7 @@ TARGETS += proc
TARGETS += pstore
TARGETS += ptrace
TARGETS += openat2
+TARGETS += resctrl
TARGETS += rlimits
TARGETS += rseq
TARGETS += rtc
@@ -140,7 +143,6 @@ endif
# Prepare for headers install
include $(top_srcdir)/scripts/subarch.include
ARCH ?= $(SUBARCH)
-export KSFT_KHDR_INSTALL_DONE := 1
export BUILD
export KHDR_INCLUDES
@@ -148,30 +150,7 @@ export KHDR_INCLUDES
# all isn't the first target in the file.
.DEFAULT_GOAL := all
-# Install headers here once for all tests. KSFT_KHDR_INSTALL_DONE
-# is used to avoid running headers_install from lib.mk.
-# Invoke headers install with --no-builtin-rules to avoid circular
-# dependency in "make kselftest" case. In this case, second level
-# make inherits builtin-rules which will use the rule generate
-# Makefile.o and runs into
-# "Circular Makefile.o <- prepare dependency dropped."
-# and headers_install fails and test compile fails.
-#
-# O= KBUILD_OUTPUT cases don't run into this error, since main Makefile
-# invokes them as sub-makes and --no-builtin-rules is not necessary,
-# but doesn't cause any failures. Keep it simple and use the same
-# flags in both cases.
-# Local build cases: "make kselftest", "make -C" - headers are installed
-# in the default INSTALL_HDR_PATH usr/include.
-khdr:
-ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
- $(MAKE) --no-builtin-rules ARCH=$(ARCH) -C $(top_srcdir) headers_install
-else
- $(MAKE) --no-builtin-rules INSTALL_HDR_PATH=$(abs_objtree)/usr \
- ARCH=$(ARCH) -C $(top_srcdir) headers_install
-endif
-
-all: khdr
+all:
@ret=1; \
for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
@@ -250,7 +229,7 @@ ifdef INSTALL_PATH
for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
[ ! -d $(INSTALL_PATH)/$$TARGET ] && echo "Skipping non-existent dir: $$TARGET" && continue; \
- echo -n "Emit Tests for $$TARGET\n"; \
+ echo -ne "Emit Tests for $$TARGET\n"; \
$(MAKE) -s --no-print-directory OUTPUT=$$BUILD_TARGET COLLECTION=$$TARGET \
-C $$TARGET emit_tests >> $(TEST_LIST); \
done;
@@ -271,4 +250,4 @@ clean:
$(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean;\
done;
-.PHONY: khdr all run_tests hotplug run_hotplug clean_hotplug run_pstore_crash install clean gen_tar
+.PHONY: all run_tests hotplug run_hotplug clean_hotplug run_pstore_crash install clean gen_tar
diff --git a/tools/testing/selftests/alsa/Makefile b/tools/testing/selftests/alsa/Makefile
index f64d9090426d..fd8ddce2b1a6 100644
--- a/tools/testing/selftests/alsa/Makefile
+++ b/tools/testing/selftests/alsa/Makefile
@@ -3,6 +3,9 @@
CFLAGS += $(shell pkg-config --cflags alsa)
LDLIBS += $(shell pkg-config --libs alsa)
+ifeq ($(LDLIBS),)
+LDLIBS += -lasound
+endif
TEST_GEN_PROGS := mixer-test
diff --git a/tools/testing/selftests/alsa/mixer-test.c b/tools/testing/selftests/alsa/mixer-test.c
index eb2213540fe3..a38b89c28030 100644
--- a/tools/testing/selftests/alsa/mixer-test.c
+++ b/tools/testing/selftests/alsa/mixer-test.c
@@ -27,7 +27,7 @@
#include "../kselftest.h"
-#define TESTS_PER_CONTROL 6
+#define TESTS_PER_CONTROL 7
struct card_data {
snd_ctl_t *handle;
@@ -456,6 +456,44 @@ out:
ctl->card->card, ctl->elem);
}
+static bool strend(const char *haystack, const char *needle)
+{
+ size_t haystack_len = strlen(haystack);
+ size_t needle_len = strlen(needle);
+
+ if (needle_len > haystack_len)
+ return false;
+ return strcmp(haystack + haystack_len - needle_len, needle) == 0;
+}
+
+static void test_ctl_name(struct ctl_data *ctl)
+{
+ bool name_ok = true;
+ bool check;
+
+ /* Only boolean controls should end in Switch */
+ if (strend(ctl->name, " Switch")) {
+ if (snd_ctl_elem_info_get_type(ctl->info) != SND_CTL_ELEM_TYPE_BOOLEAN) {
+ ksft_print_msg("%d.%d %s ends in Switch but is not boolean\n",
+ ctl->card->card, ctl->elem, ctl->name);
+ name_ok = false;
+ }
+ }
+
+ /* Writeable boolean controls should end in Switch */
+ if (snd_ctl_elem_info_get_type(ctl->info) == SND_CTL_ELEM_TYPE_BOOLEAN &&
+ snd_ctl_elem_info_is_writable(ctl->info)) {
+ if (!strend(ctl->name, " Switch")) {
+ ksft_print_msg("%d.%d %s is a writeable boolean but not a Switch\n",
+ ctl->card->card, ctl->elem, ctl->name);
+ name_ok = false;
+ }
+ }
+
+ ksft_test_result(name_ok, "name.%d.%d\n",
+ ctl->card->card, ctl->elem);
+}
+
static bool show_mismatch(struct ctl_data *ctl, int index,
snd_ctl_elem_value_t *read_val,
snd_ctl_elem_value_t *expected_val)
@@ -1062,6 +1100,7 @@ int main(void)
* test stores the default value for later cleanup.
*/
test_ctl_get_value(ctl);
+ test_ctl_name(ctl);
test_ctl_write_default(ctl);
test_ctl_write_valid(ctl);
test_ctl_write_invalid(ctl);
diff --git a/tools/testing/selftests/arm64/mte/Makefile b/tools/testing/selftests/arm64/mte/Makefile
index 409e3e53d00a..a5a0744423d8 100644
--- a/tools/testing/selftests/arm64/mte/Makefile
+++ b/tools/testing/selftests/arm64/mte/Makefile
@@ -22,7 +22,6 @@ ifeq ($(mte_cc_support),1)
TEST_GEN_PROGS := $(PROGS)
# Get Kernel headers installed and use them.
-KSFT_KHDR_INSTALL := 1
else
$(warning compiler "$(CC)" does not support the ARMv8.5 MTE extension.)
$(warning test program "mte" will not be created.)
diff --git a/tools/testing/selftests/arm64/signal/Makefile b/tools/testing/selftests/arm64/signal/Makefile
index ac4ad0005715..be7520a863b0 100644
--- a/tools/testing/selftests/arm64/signal/Makefile
+++ b/tools/testing/selftests/arm64/signal/Makefile
@@ -11,7 +11,6 @@ PROGS := $(patsubst %.c,%,$(SRCS))
TEST_GEN_PROGS := $(notdir $(PROGS))
# Get Kernel headers installed and use them.
-KSFT_KHDR_INSTALL := 1
# Including KSFT lib.mk here will also mangle the TEST_GEN_PROGS list
# to account for any OUTPUT target-dirs optionally provided by
diff --git a/tools/testing/selftests/arm64/signal/test_signals.h b/tools/testing/selftests/arm64/signal/test_signals.h
index c70fdec7d7c4..0c645834ddc3 100644
--- a/tools/testing/selftests/arm64/signal/test_signals.h
+++ b/tools/testing/selftests/arm64/signal/test_signals.h
@@ -9,9 +9,7 @@
#include <ucontext.h>
/*
- * Using ARCH specific and sanitized Kernel headers installed by KSFT
- * framework since we asked for it by setting flag KSFT_KHDR_INSTALL
- * in our Makefile.
+ * Using ARCH specific and sanitized Kernel headers from the tree.
*/
#include <asm/ptrace.h>
#include <asm/hwcap.h>
diff --git a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c
index bb50b5adbf10..915821375b0a 100644
--- a/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c
+++ b/tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c
@@ -6,6 +6,7 @@
* supported and is expected to segfault.
*/
+#include <kselftest.h>
#include <signal.h>
#include <ucontext.h>
#include <sys/prctl.h>
@@ -40,6 +41,7 @@ static bool sve_get_vls(struct tdescr *td)
/* We need at least two VLs */
if (nvls < 2) {
fprintf(stderr, "Only %d VL supported\n", nvls);
+ td->result = KSFT_SKIP;
return false;
}
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 595565eb68c0..3a8cb2404ea6 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -41,5 +41,6 @@ test_cpp
/bench
*.ko
*.tmp
-xdpxceiver
+xskxceiver
xdp_redirect_multi
+xdp_synproxy
diff --git a/tools/testing/selftests/bpf/DENYLIST b/tools/testing/selftests/bpf/DENYLIST
new file mode 100644
index 000000000000..939de574fc7f
--- /dev/null
+++ b/tools/testing/selftests/bpf/DENYLIST
@@ -0,0 +1,6 @@
+# TEMPORARY
+get_stack_raw_tp # spams with kernel warnings until next bpf -> bpf-next merge
+stacktrace_build_id_nmi
+stacktrace_build_id
+task_fd_query_rawtp
+varlen
diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x
new file mode 100644
index 000000000000..e33cab34d22f
--- /dev/null
+++ b/tools/testing/selftests/bpf/DENYLIST.s390x
@@ -0,0 +1,67 @@
+# TEMPORARY
+atomics # attach(add): actual -524 <= expected 0 (trampoline)
+bpf_iter_setsockopt # JIT does not support calling kernel function (kfunc)
+bloom_filter_map # failed to find kernel BTF type ID of '__x64_sys_getpgid': -3 (?)
+bpf_tcp_ca # JIT does not support calling kernel function (kfunc)
+bpf_loop # attaches to __x64_sys_nanosleep
+bpf_mod_race # BPF trampoline
+bpf_nf # JIT does not support calling kernel function
+core_read_macros # unknown func bpf_probe_read#4 (overlapping)
+d_path # failed to auto-attach program 'prog_stat': -524 (trampoline)
+dummy_st_ops # test_run unexpected error: -524 (errno 524) (trampoline)
+fentry_fexit # fentry attach failed: -524 (trampoline)
+fentry_test # fentry_first_attach unexpected error: -524 (trampoline)
+fexit_bpf2bpf # freplace_attach_trace unexpected error: -524 (trampoline)
+fexit_sleep # fexit_skel_load fexit skeleton failed (trampoline)
+fexit_stress # fexit attach failed prog 0 failed: -524 (trampoline)
+fexit_test # fexit_first_attach unexpected error: -524 (trampoline)
+get_func_args_test # trampoline
+get_func_ip_test # get_func_ip_test__attach unexpected error: -524 (trampoline)
+get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
+kfree_skb # attach fentry unexpected error: -524 (trampoline)
+kfunc_call # 'bpf_prog_active': not found in kernel BTF (?)
+ksyms_module # test_ksyms_module__open_and_load unexpected error: -9 (?)
+ksyms_module_libbpf # JIT does not support calling kernel function (kfunc)
+ksyms_module_lskel # test_ksyms_module_lskel__open_and_load unexpected error: -9 (?)
+modify_return # modify_return attach failed: -524 (trampoline)
+module_attach # skel_attach skeleton attach failed: -524 (trampoline)
+mptcp
+kprobe_multi_test # relies on fentry
+netcnt # failed to load BPF skeleton 'netcnt_prog': -7 (?)
+probe_user # check_kprobe_res wrong kprobe res from probe read (?)
+recursion # skel_attach unexpected error: -524 (trampoline)
+ringbuf # skel_load skeleton load failed (?)
+sk_assign # Can't read on server: Invalid argument (?)
+sk_lookup # endianness problem
+sk_storage_tracing # test_sk_storage_tracing__attach unexpected error: -524 (trampoline)
+skc_to_unix_sock # could not attach BPF object unexpected error: -524 (trampoline)
+socket_cookie # prog_attach unexpected error: -524 (trampoline)
+stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?)
+tailcalls # tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls (?)
+task_local_storage # failed to auto-attach program 'trace_exit_creds': -524 (trampoline)
+test_bpffs # bpffs test failed 255 (iterator)
+test_bprm_opts # failed to auto-attach program 'secure_exec': -524 (trampoline)
+test_ima # failed to auto-attach program 'ima': -524 (trampoline)
+test_local_storage # failed to auto-attach program 'unlink_hook': -524 (trampoline)
+test_lsm # failed to find kernel BTF type ID of '__x64_sys_setdomainname': -3 (?)
+test_overhead # attach_fentry unexpected error: -524 (trampoline)
+test_profiler # unknown func bpf_probe_read_str#45 (overlapping)
+timer # failed to auto-attach program 'test1': -524 (trampoline)
+timer_crash # trampoline
+timer_mim # failed to auto-attach program 'test1': -524 (trampoline)
+trace_ext # failed to auto-attach program 'test_pkt_md_access_new': -524 (trampoline)
+trace_printk # trace_printk__load unexpected error: -2 (errno 2) (?)
+trace_vprintk # trace_vprintk__open_and_load unexpected error: -9 (?)
+trampoline_count # prog 'prog1': failed to attach: ERROR: strerror_r(-524)=22 (trampoline)
+verif_stats # trace_vprintk__open_and_load unexpected error: -9 (?)
+vmlinux # failed to auto-attach program 'handle__fentry': -524 (trampoline)
+xdp_adjust_tail # case-128 err 0 errno 28 retval 1 size 128 expect-size 3520 (?)
+xdp_bonding # failed to auto-attach program 'trace_on_entry': -524 (trampoline)
+xdp_bpf2bpf # failed to auto-attach program 'trace_on_entry': -524 (trampoline)
+map_kptr # failed to open_and_load program: -524 (trampoline)
+bpf_cookie # failed to open_and_load program: -524 (trampoline)
+xdp_do_redirect # prog_run_max_size unexpected error: -22 (errno 22)
+send_signal # intermittently fails to receive signal
+select_reuseport # intermittently fails on new s390x setup
+xdp_synproxy # JIT does not support calling kernel function (kfunc)
+unpriv_bpf_disabled # fentry
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 3820608faf57..8d59ec7f4c2d 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -75,14 +75,14 @@ TEST_PROGS := test_kmod.sh \
test_xsk.sh
TEST_PROGS_EXTENDED := with_addr.sh \
- with_tunnels.sh \
+ with_tunnels.sh ima_setup.sh \
test_xdp_vlan.sh test_bpftool.py
# Compile but not part of 'make run_tests'
TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \
flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko \
- xdpxceiver xdp_redirect_multi
+ xskxceiver xdp_redirect_multi xdp_synproxy
TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read
@@ -168,9 +168,26 @@ $(OUTPUT)/%:%.c
$(call msg,BINARY,,$@)
$(Q)$(LINK.c) $^ $(LDLIBS) -o $@
-$(OUTPUT)/urandom_read: urandom_read.c
+# LLVM's ld.lld doesn't support all the architectures, so use it only on x86
+ifeq ($(SRCARCH),x86)
+LLD := lld
+else
+LLD := ld
+endif
+
+# Filter out -static for liburandom_read.so and its dependent targets so that static builds
+# do not fail. Static builds leave urandom_read relying on system-wide shared libraries.
+$(OUTPUT)/liburandom_read.so: urandom_read_lib1.c urandom_read_lib2.c
+ $(call msg,LIB,,$@)
+ $(Q)$(CLANG) $(filter-out -static,$(CFLAGS) $(LDFLAGS)) $^ $(LDLIBS) \
+ -fuse-ld=$(LLD) -Wl,-znoseparate-code -fPIC -shared -o $@
+
+$(OUTPUT)/urandom_read: urandom_read.c urandom_read_aux.c $(OUTPUT)/liburandom_read.so
$(call msg,BINARY,,$@)
- $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $< $(LDLIBS) -Wl,--build-id=sha1 -o $@
+ $(Q)$(CLANG) $(filter-out -static,$(CFLAGS) $(LDFLAGS)) $(filter %.c,$^) \
+ liburandom_read.so $(LDLIBS) \
+ -fuse-ld=$(LLD) -Wl,-znoseparate-code \
+ -Wl,-rpath=. -Wl,--build-id=sha1 -o $@
$(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(wildcard bpf_testmod/Makefile bpf_testmod/*.[ch])
$(call msg,MOD,,$@)
@@ -213,6 +230,8 @@ $(OUTPUT)/xdping: $(TESTING_HELPERS)
$(OUTPUT)/flow_dissector_load: $(TESTING_HELPERS)
$(OUTPUT)/test_maps: $(TESTING_HELPERS)
$(OUTPUT)/test_verifier: $(TESTING_HELPERS) $(CAP_HELPERS)
+$(OUTPUT)/xsk.o: $(BPFOBJ)
+$(OUTPUT)/xskxceiver: $(OUTPUT)/xsk.o
BPFTOOL ?= $(DEFAULT_BPFTOOL)
$(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
@@ -328,12 +347,8 @@ SKEL_BLACKLIST := btf__% test_pinning_invalid.c test_sk_assign.c
LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
linked_vars.skel.h linked_maps.skel.h \
- test_subskeleton.skel.h test_subskeleton_lib.skel.h
-
-# In the subskeleton case, we want the test_subskeleton_lib.subskel.h file
-# but that's created as a side-effect of the skel.h generation.
-test_subskeleton.skel.h-deps := test_subskeleton_lib2.o test_subskeleton_lib.o test_subskeleton.o
-test_subskeleton_lib.skel.h-deps := test_subskeleton_lib2.o test_subskeleton_lib.o
+ test_subskeleton.skel.h test_subskeleton_lib.skel.h \
+ test_usdt.skel.h
LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \
test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c \
@@ -346,6 +361,11 @@ test_static_linked.skel.h-deps := test_static_linked1.o test_static_linked2.o
linked_funcs.skel.h-deps := linked_funcs1.o linked_funcs2.o
linked_vars.skel.h-deps := linked_vars1.o linked_vars2.o
linked_maps.skel.h-deps := linked_maps1.o linked_maps2.o
+# In the subskeleton case, we want the test_subskeleton_lib.subskel.h file
+# but that's created as a side-effect of the skel.h generation.
+test_subskeleton.skel.h-deps := test_subskeleton_lib2.o test_subskeleton_lib.o test_subskeleton.o
+test_subskeleton_lib.skel.h-deps := test_subskeleton_lib2.o test_subskeleton_lib.o
+test_usdt.skel.h-deps := test_usdt.o test_usdt_multispec.o
LINKED_BPF_SRCS := $(patsubst %.o,%.c,$(foreach skel,$(LINKED_SKELS),$($(skel)-deps)))
@@ -400,6 +420,7 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o: \
$(TRUNNER_BPF_PROGS_DIR)/*.h \
$$(INCLUDE_DIR)/vmlinux.h \
$(wildcard $(BPFDIR)/bpf_*.h) \
+ $(wildcard $(BPFDIR)/*.bpf.h) \
| $(TRUNNER_OUTPUT) $$(BPFOBJ)
$$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \
$(TRUNNER_BPF_CFLAGS))
@@ -415,11 +436,11 @@ $(TRUNNER_BPF_SKELS): %.skel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$(TRUNNER_BPF_LSKELS): %.lskel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
- $(Q)$$(BPFTOOL) gen object $$(<:.o=.linked1.o) $$<
- $(Q)$$(BPFTOOL) gen object $$(<:.o=.linked2.o) $$(<:.o=.linked1.o)
- $(Q)$$(BPFTOOL) gen object $$(<:.o=.linked3.o) $$(<:.o=.linked2.o)
- $(Q)diff $$(<:.o=.linked2.o) $$(<:.o=.linked3.o)
- $(Q)$$(BPFTOOL) gen skeleton -L $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=_lskel)) > $$@
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.llinked1.o) $$<
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.llinked2.o) $$(<:.o=.llinked1.o)
+ $(Q)$$(BPFTOOL) gen object $$(<:.o=.llinked3.o) $$(<:.o=.llinked2.o)
+ $(Q)diff $$(<:.o=.llinked2.o) $$(<:.o=.llinked3.o)
+ $(Q)$$(BPFTOOL) gen skeleton -L $$(<:.o=.llinked3.o) name $$(notdir $$(<:.o=_lskel)) > $$@
$(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,LINK-BPF,$(TRUNNER_BINARY),$$(@:.skel.h=.o))
@@ -491,6 +512,8 @@ TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \
btf_helpers.c flow_dissector_load.h \
cap_helpers.c
TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \
+ $(OUTPUT)/liburandom_read.so \
+ $(OUTPUT)/xdp_synproxy \
ima_setup.sh \
$(wildcard progs/btf_dump_test_case_*.c)
TRUNNER_BPF_BUILD_RULE := CLANG_BPF_BUILD_RULE
@@ -549,6 +572,9 @@ $(OUTPUT)/bench_ringbufs.o: $(OUTPUT)/ringbuf_bench.skel.h \
$(OUTPUT)/bench_bloom_filter_map.o: $(OUTPUT)/bloom_filter_bench.skel.h
$(OUTPUT)/bench_bpf_loop.o: $(OUTPUT)/bpf_loop_bench.skel.h
$(OUTPUT)/bench_strncmp.o: $(OUTPUT)/strncmp_bench.skel.h
+$(OUTPUT)/bench_bpf_hashmap_full_update.o: $(OUTPUT)/bpf_hashmap_full_update_bench.skel.h
+$(OUTPUT)/bench_local_storage.o: $(OUTPUT)/local_storage_bench.skel.h
+$(OUTPUT)/bench_local_storage_rcu_tasks_trace.o: $(OUTPUT)/local_storage_rcu_tasks_trace_bench.skel.h
$(OUTPUT)/bench.o: bench.h testing_helpers.h $(BPFOBJ)
$(OUTPUT)/bench: LDLIBS += -lm
$(OUTPUT)/bench: $(OUTPUT)/bench.o \
@@ -560,13 +586,18 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \
$(OUTPUT)/bench_ringbufs.o \
$(OUTPUT)/bench_bloom_filter_map.o \
$(OUTPUT)/bench_bpf_loop.o \
- $(OUTPUT)/bench_strncmp.o
+ $(OUTPUT)/bench_strncmp.o \
+ $(OUTPUT)/bench_bpf_hashmap_full_update.o \
+ $(OUTPUT)/bench_local_storage.o \
+ $(OUTPUT)/bench_local_storage_rcu_tasks_trace.o
$(call msg,BINARY,,$@)
$(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
feature bpftool \
- $(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h *.subskel.h no_alu32 bpf_gcc bpf_testmod.ko)
+ $(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h *.subskel.h \
+ no_alu32 bpf_gcc bpf_testmod.ko \
+ liburandom_read.so)
.PHONY: docs docs-clean
diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c
index f973320e6dbf..c1f20a147462 100644
--- a/tools/testing/selftests/bpf/bench.c
+++ b/tools/testing/selftests/bpf/bench.c
@@ -8,7 +8,6 @@
#include <fcntl.h>
#include <pthread.h>
#include <sys/sysinfo.h>
-#include <sys/resource.h>
#include <signal.h>
#include "bench.h"
#include "testing_helpers.h"
@@ -80,6 +79,43 @@ void hits_drops_report_progress(int iter, struct bench_res *res, long delta_ns)
hits_per_sec, hits_per_prod, drops_per_sec, hits_per_sec + drops_per_sec);
}
+void
+grace_period_latency_basic_stats(struct bench_res res[], int res_cnt, struct basic_stats *gp_stat)
+{
+ int i;
+
+ memset(gp_stat, 0, sizeof(struct basic_stats));
+
+ for (i = 0; i < res_cnt; i++)
+ gp_stat->mean += res[i].gp_ns / 1000.0 / (double)res[i].gp_ct / (0.0 + res_cnt);
+
+#define IT_MEAN_DIFF (res[i].gp_ns / 1000.0 / (double)res[i].gp_ct - gp_stat->mean)
+ if (res_cnt > 1) {
+ for (i = 0; i < res_cnt; i++)
+ gp_stat->stddev += (IT_MEAN_DIFF * IT_MEAN_DIFF) / (res_cnt - 1.0);
+ }
+ gp_stat->stddev = sqrt(gp_stat->stddev);
+#undef IT_MEAN_DIFF
+}
+
+void
+grace_period_ticks_basic_stats(struct bench_res res[], int res_cnt, struct basic_stats *gp_stat)
+{
+ int i;
+
+ memset(gp_stat, 0, sizeof(struct basic_stats));
+ for (i = 0; i < res_cnt; i++)
+ gp_stat->mean += res[i].stime / (double)res[i].gp_ct / (0.0 + res_cnt);
+
+#define IT_MEAN_DIFF (res[i].stime / (double)res[i].gp_ct - gp_stat->mean)
+ if (res_cnt > 1) {
+ for (i = 0; i < res_cnt; i++)
+ gp_stat->stddev += (IT_MEAN_DIFF * IT_MEAN_DIFF) / (res_cnt - 1.0);
+ }
+ gp_stat->stddev = sqrt(gp_stat->stddev);
+#undef IT_MEAN_DIFF
+}
+
void hits_drops_report_final(struct bench_res res[], int res_cnt)
{
int i;
@@ -151,6 +187,53 @@ void ops_report_final(struct bench_res res[], int res_cnt)
printf("latency %8.3lf ns/op\n", 1000.0 / hits_mean * env.producer_cnt);
}
+void local_storage_report_progress(int iter, struct bench_res *res,
+ long delta_ns)
+{
+ double important_hits_per_sec, hits_per_sec;
+ double delta_sec = delta_ns / 1000000000.0;
+
+ hits_per_sec = res->hits / 1000000.0 / delta_sec;
+ important_hits_per_sec = res->important_hits / 1000000.0 / delta_sec;
+
+ printf("Iter %3d (%7.3lfus): ", iter, (delta_ns - 1000000000) / 1000.0);
+
+ printf("hits %8.3lfM/s ", hits_per_sec);
+ printf("important_hits %8.3lfM/s\n", important_hits_per_sec);
+}
+
+void local_storage_report_final(struct bench_res res[], int res_cnt)
+{
+ double important_hits_mean = 0.0, important_hits_stddev = 0.0;
+ double hits_mean = 0.0, hits_stddev = 0.0;
+ int i;
+
+ for (i = 0; i < res_cnt; i++) {
+ hits_mean += res[i].hits / 1000000.0 / (0.0 + res_cnt);
+ important_hits_mean += res[i].important_hits / 1000000.0 / (0.0 + res_cnt);
+ }
+
+ if (res_cnt > 1) {
+ for (i = 0; i < res_cnt; i++) {
+ hits_stddev += (hits_mean - res[i].hits / 1000000.0) *
+ (hits_mean - res[i].hits / 1000000.0) /
+ (res_cnt - 1.0);
+ important_hits_stddev +=
+ (important_hits_mean - res[i].important_hits / 1000000.0) *
+ (important_hits_mean - res[i].important_hits / 1000000.0) /
+ (res_cnt - 1.0);
+ }
+
+ hits_stddev = sqrt(hits_stddev);
+ important_hits_stddev = sqrt(important_hits_stddev);
+ }
+ printf("Summary: hits throughput %8.3lf \u00B1 %5.3lf M ops/s, ",
+ hits_mean, hits_stddev);
+ printf("hits latency %8.3lf ns/op, ", 1000.0 / hits_mean);
+ printf("important_hits throughput %8.3lf \u00B1 %5.3lf M ops/s\n",
+ important_hits_mean, important_hits_stddev);
+}
+
const char *argp_program_version = "benchmark";
const char *argp_program_bug_address = "<bpf@vger.kernel.org>";
const char argp_program_doc[] =
@@ -189,13 +272,18 @@ static const struct argp_option opts[] = {
extern struct argp bench_ringbufs_argp;
extern struct argp bench_bloom_map_argp;
extern struct argp bench_bpf_loop_argp;
+extern struct argp bench_local_storage_argp;
+extern struct argp bench_local_storage_rcu_tasks_trace_argp;
extern struct argp bench_strncmp_argp;
static const struct argp_child bench_parsers[] = {
{ &bench_ringbufs_argp, 0, "Ring buffers benchmark", 0 },
{ &bench_bloom_map_argp, 0, "Bloom filter map benchmark", 0 },
{ &bench_bpf_loop_argp, 0, "bpf_loop helper benchmark", 0 },
+ { &bench_local_storage_argp, 0, "local_storage benchmark", 0 },
{ &bench_strncmp_argp, 0, "bpf_strncmp helper benchmark", 0 },
+ { &bench_local_storage_rcu_tasks_trace_argp, 0,
+ "local_storage RCU Tasks Trace slowdown benchmark", 0 },
{},
};
@@ -397,6 +485,11 @@ extern const struct bench bench_hashmap_with_bloom;
extern const struct bench bench_bpf_loop;
extern const struct bench bench_strncmp_no_helper;
extern const struct bench bench_strncmp_helper;
+extern const struct bench bench_bpf_hashmap_full_update;
+extern const struct bench bench_local_storage_cache_seq_get;
+extern const struct bench bench_local_storage_cache_interleaved_get;
+extern const struct bench bench_local_storage_cache_hashmap_control;
+extern const struct bench bench_local_storage_tasks_trace;
static const struct bench *benchs[] = {
&bench_count_global,
@@ -431,6 +524,11 @@ static const struct bench *benchs[] = {
&bench_bpf_loop,
&bench_strncmp_no_helper,
&bench_strncmp_helper,
+ &bench_bpf_hashmap_full_update,
+ &bench_local_storage_cache_seq_get,
+ &bench_local_storage_cache_interleaved_get,
+ &bench_local_storage_cache_hashmap_control,
+ &bench_local_storage_tasks_trace,
};
static void setup_benchmark()
diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h
index fb3e213df3dc..d748255877e2 100644
--- a/tools/testing/selftests/bpf/bench.h
+++ b/tools/testing/selftests/bpf/bench.h
@@ -30,10 +30,19 @@ struct env {
struct cpu_set cons_cpus;
};
+struct basic_stats {
+ double mean;
+ double stddev;
+};
+
struct bench_res {
long hits;
long drops;
long false_hits;
+ long important_hits;
+ unsigned long gp_ns;
+ unsigned long gp_ct;
+ unsigned int stime;
};
struct bench {
@@ -61,6 +70,13 @@ void false_hits_report_progress(int iter, struct bench_res *res, long delta_ns);
void false_hits_report_final(struct bench_res res[], int res_cnt);
void ops_report_progress(int iter, struct bench_res *res, long delta_ns);
void ops_report_final(struct bench_res res[], int res_cnt);
+void local_storage_report_progress(int iter, struct bench_res *res,
+ long delta_ns);
+void local_storage_report_final(struct bench_res res[], int res_cnt);
+void grace_period_latency_basic_stats(struct bench_res res[], int res_cnt,
+ struct basic_stats *gp_stat);
+void grace_period_ticks_basic_stats(struct bench_res res[], int res_cnt,
+ struct basic_stats *gp_stat);
static inline __u64 get_time_ns(void)
{
diff --git a/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c
new file mode 100644
index 000000000000..cec51e0ff4b8
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Bytedance */
+
+#include <argp.h>
+#include "bench.h"
+#include "bpf_hashmap_full_update_bench.skel.h"
+#include "bpf_util.h"
+
+/* BPF triggering benchmarks */
+static struct ctx {
+ struct bpf_hashmap_full_update_bench *skel;
+} ctx;
+
+#define MAX_LOOP_NUM 10000
+
+static void validate(void)
+{
+ if (env.consumer_cnt != 1) {
+ fprintf(stderr, "benchmark doesn't support multi-consumer!\n");
+ exit(1);
+ }
+}
+
+static void *producer(void *input)
+{
+ while (true) {
+ /* trigger the bpf program */
+ syscall(__NR_getpgid);
+ }
+
+ return NULL;
+}
+
+static void *consumer(void *input)
+{
+ return NULL;
+}
+
+static void measure(struct bench_res *res)
+{
+}
+
+static void setup(void)
+{
+ struct bpf_link *link;
+ int map_fd, i, max_entries;
+
+ setup_libbpf();
+
+ ctx.skel = bpf_hashmap_full_update_bench__open_and_load();
+ if (!ctx.skel) {
+ fprintf(stderr, "failed to open skeleton\n");
+ exit(1);
+ }
+
+ ctx.skel->bss->nr_loops = MAX_LOOP_NUM;
+
+ link = bpf_program__attach(ctx.skel->progs.benchmark);
+ if (!link) {
+ fprintf(stderr, "failed to attach program!\n");
+ exit(1);
+ }
+
+ /* fill hash_map */
+ map_fd = bpf_map__fd(ctx.skel->maps.hash_map_bench);
+ max_entries = bpf_map__max_entries(ctx.skel->maps.hash_map_bench);
+ for (i = 0; i < max_entries; i++)
+ bpf_map_update_elem(map_fd, &i, &i, BPF_ANY);
+}
+
+void hashmap_report_final(struct bench_res res[], int res_cnt)
+{
+ unsigned int nr_cpus = bpf_num_possible_cpus();
+ int i;
+
+ for (i = 0; i < nr_cpus; i++) {
+ u64 time = ctx.skel->bss->percpu_time[i];
+
+ if (!time)
+ continue;
+
+ printf("%d:hash_map_full_perf %lld events per sec\n",
+ i, ctx.skel->bss->nr_loops * 1000000000ll / time);
+ }
+}
+
+const struct bench bench_bpf_hashmap_full_update = {
+ .name = "bpf-hashmap-ful-update",
+ .validate = validate,
+ .setup = setup,
+ .producer_thread = producer,
+ .consumer_thread = consumer,
+ .measure = measure,
+ .report_progress = NULL,
+ .report_final = hashmap_report_final,
+};
diff --git a/tools/testing/selftests/bpf/benchs/bench_local_storage.c b/tools/testing/selftests/bpf/benchs/bench_local_storage.c
new file mode 100644
index 000000000000..5a378c84e81f
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/bench_local_storage.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <argp.h>
+#include <linux/btf.h>
+
+#include "local_storage_bench.skel.h"
+#include "bench.h"
+
+#include <test_btf.h>
+
+static struct {
+ __u32 nr_maps;
+ __u32 hashmap_nr_keys_used;
+} args = {
+ .nr_maps = 1000,
+ .hashmap_nr_keys_used = 1000,
+};
+
+enum {
+ ARG_NR_MAPS = 6000,
+ ARG_HASHMAP_NR_KEYS_USED = 6001,
+};
+
+static const struct argp_option opts[] = {
+ { "nr_maps", ARG_NR_MAPS, "NR_MAPS", 0,
+ "Set number of local_storage maps"},
+ { "hashmap_nr_keys_used", ARG_HASHMAP_NR_KEYS_USED, "NR_KEYS",
+ 0, "When doing hashmap test, set number of hashmap keys test uses"},
+ {},
+};
+
+static error_t parse_arg(int key, char *arg, struct argp_state *state)
+{
+ long ret;
+
+ switch (key) {
+ case ARG_NR_MAPS:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > UINT_MAX) {
+ fprintf(stderr, "invalid nr_maps");
+ argp_usage(state);
+ }
+ args.nr_maps = ret;
+ break;
+ case ARG_HASHMAP_NR_KEYS_USED:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > UINT_MAX) {
+ fprintf(stderr, "invalid hashmap_nr_keys_used");
+ argp_usage(state);
+ }
+ args.hashmap_nr_keys_used = ret;
+ break;
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+
+ return 0;
+}
+
+const struct argp bench_local_storage_argp = {
+ .options = opts,
+ .parser = parse_arg,
+};
+
+/* Keep in sync w/ array of maps in bpf */
+#define MAX_NR_MAPS 1000
+/* keep in sync w/ same define in bpf */
+#define HASHMAP_SZ 4194304
+
+static void validate(void)
+{
+ if (env.producer_cnt != 1) {
+ fprintf(stderr, "benchmark doesn't support multi-producer!\n");
+ exit(1);
+ }
+ if (env.consumer_cnt != 1) {
+ fprintf(stderr, "benchmark doesn't support multi-consumer!\n");
+ exit(1);
+ }
+
+ if (args.nr_maps > MAX_NR_MAPS) {
+ fprintf(stderr, "nr_maps must be <= 1000\n");
+ exit(1);
+ }
+
+ if (args.hashmap_nr_keys_used > HASHMAP_SZ) {
+ fprintf(stderr, "hashmap_nr_keys_used must be <= %u\n", HASHMAP_SZ);
+ exit(1);
+ }
+}
+
+static struct {
+ struct local_storage_bench *skel;
+ void *bpf_obj;
+ struct bpf_map *array_of_maps;
+} ctx;
+
+static void prepopulate_hashmap(int fd)
+{
+ int i, key, val;
+
+ /* local_storage gets will have BPF_LOCAL_STORAGE_GET_F_CREATE flag set, so
+ * populate the hashmap for a similar comparison
+ */
+ for (i = 0; i < HASHMAP_SZ; i++) {
+ key = val = i;
+ if (bpf_map_update_elem(fd, &key, &val, 0)) {
+ fprintf(stderr, "Error prepopulating hashmap (key %d)\n", key);
+ exit(1);
+ }
+ }
+}
+
+static void __setup(struct bpf_program *prog, bool hashmap)
+{
+ struct bpf_map *inner_map;
+ int i, fd, mim_fd, err;
+
+ LIBBPF_OPTS(bpf_map_create_opts, create_opts);
+
+ if (!hashmap)
+ create_opts.map_flags = BPF_F_NO_PREALLOC;
+
+ ctx.skel->rodata->num_maps = args.nr_maps;
+ ctx.skel->rodata->hashmap_num_keys = args.hashmap_nr_keys_used;
+ inner_map = bpf_map__inner_map(ctx.array_of_maps);
+ create_opts.btf_key_type_id = bpf_map__btf_key_type_id(inner_map);
+ create_opts.btf_value_type_id = bpf_map__btf_value_type_id(inner_map);
+
+ err = local_storage_bench__load(ctx.skel);
+ if (err) {
+ fprintf(stderr, "Error loading skeleton\n");
+ goto err_out;
+ }
+
+ create_opts.btf_fd = bpf_object__btf_fd(ctx.skel->obj);
+
+ mim_fd = bpf_map__fd(ctx.array_of_maps);
+ if (mim_fd < 0) {
+ fprintf(stderr, "Error getting map_in_map fd\n");
+ goto err_out;
+ }
+
+ for (i = 0; i < args.nr_maps; i++) {
+ if (hashmap)
+ fd = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(int),
+ sizeof(int), HASHMAP_SZ, &create_opts);
+ else
+ fd = bpf_map_create(BPF_MAP_TYPE_TASK_STORAGE, NULL, sizeof(int),
+ sizeof(int), 0, &create_opts);
+ if (fd < 0) {
+ fprintf(stderr, "Error creating map %d: %d\n", i, fd);
+ goto err_out;
+ }
+
+ if (hashmap)
+ prepopulate_hashmap(fd);
+
+ err = bpf_map_update_elem(mim_fd, &i, &fd, 0);
+ if (err) {
+ fprintf(stderr, "Error updating array-of-maps w/ map %d\n", i);
+ goto err_out;
+ }
+ }
+
+ if (!bpf_program__attach(prog)) {
+ fprintf(stderr, "Error attaching bpf program\n");
+ goto err_out;
+ }
+
+ return;
+err_out:
+ exit(1);
+}
+
+static void hashmap_setup(void)
+{
+ struct local_storage_bench *skel;
+
+ setup_libbpf();
+
+ skel = local_storage_bench__open();
+ ctx.skel = skel;
+ ctx.array_of_maps = skel->maps.array_of_hash_maps;
+ skel->rodata->use_hashmap = 1;
+ skel->rodata->interleave = 0;
+
+ __setup(skel->progs.get_local, true);
+}
+
+static void local_storage_cache_get_setup(void)
+{
+ struct local_storage_bench *skel;
+
+ setup_libbpf();
+
+ skel = local_storage_bench__open();
+ ctx.skel = skel;
+ ctx.array_of_maps = skel->maps.array_of_local_storage_maps;
+ skel->rodata->use_hashmap = 0;
+ skel->rodata->interleave = 0;
+
+ __setup(skel->progs.get_local, false);
+}
+
+static void local_storage_cache_get_interleaved_setup(void)
+{
+ struct local_storage_bench *skel;
+
+ setup_libbpf();
+
+ skel = local_storage_bench__open();
+ ctx.skel = skel;
+ ctx.array_of_maps = skel->maps.array_of_local_storage_maps;
+ skel->rodata->use_hashmap = 0;
+ skel->rodata->interleave = 1;
+
+ __setup(skel->progs.get_local, false);
+}
+
+static void measure(struct bench_res *res)
+{
+ res->hits = atomic_swap(&ctx.skel->bss->hits, 0);
+ res->important_hits = atomic_swap(&ctx.skel->bss->important_hits, 0);
+}
+
+static inline void trigger_bpf_program(void)
+{
+ syscall(__NR_getpgid);
+}
+
+static void *consumer(void *input)
+{
+ return NULL;
+}
+
+static void *producer(void *input)
+{
+ while (true)
+ trigger_bpf_program();
+
+ return NULL;
+}
+
+/* cache sequential and interleaved get benchs test local_storage get
+ * performance, specifically they demonstrate performance cliff of
+ * current list-plus-cache local_storage model.
+ *
+ * cache sequential get: call bpf_task_storage_get on n maps in order
+ * cache interleaved get: like "sequential get", but interleave 4 calls to the
+ * 'important' map (idx 0 in array_of_maps) for every 10 calls. Goal
+ * is to mimic environment where many progs are accessing their local_storage
+ * maps, with 'our' prog needing to access its map more often than others
+ */
+const struct bench bench_local_storage_cache_seq_get = {
+ .name = "local-storage-cache-seq-get",
+ .validate = validate,
+ .setup = local_storage_cache_get_setup,
+ .producer_thread = producer,
+ .consumer_thread = consumer,
+ .measure = measure,
+ .report_progress = local_storage_report_progress,
+ .report_final = local_storage_report_final,
+};
+
+const struct bench bench_local_storage_cache_interleaved_get = {
+ .name = "local-storage-cache-int-get",
+ .validate = validate,
+ .setup = local_storage_cache_get_interleaved_setup,
+ .producer_thread = producer,
+ .consumer_thread = consumer,
+ .measure = measure,
+ .report_progress = local_storage_report_progress,
+ .report_final = local_storage_report_final,
+};
+
+const struct bench bench_local_storage_cache_hashmap_control = {
+ .name = "local-storage-cache-hashmap-control",
+ .validate = validate,
+ .setup = hashmap_setup,
+ .producer_thread = producer,
+ .consumer_thread = consumer,
+ .measure = measure,
+ .report_progress = local_storage_report_progress,
+ .report_final = local_storage_report_final,
+};
diff --git a/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c b/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c
new file mode 100644
index 000000000000..43f109d93130
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <argp.h>
+
+#include <sys/prctl.h>
+#include "local_storage_rcu_tasks_trace_bench.skel.h"
+#include "bench.h"
+
+#include <signal.h>
+
+static struct {
+ __u32 nr_procs;
+ __u32 kthread_pid;
+ bool quiet;
+} args = {
+ .nr_procs = 1000,
+ .kthread_pid = 0,
+ .quiet = false,
+};
+
+enum {
+ ARG_NR_PROCS = 7000,
+ ARG_KTHREAD_PID = 7001,
+ ARG_QUIET = 7002,
+};
+
+static const struct argp_option opts[] = {
+ { "nr_procs", ARG_NR_PROCS, "NR_PROCS", 0,
+ "Set number of user processes to spin up"},
+ { "kthread_pid", ARG_KTHREAD_PID, "PID", 0,
+ "Pid of rcu_tasks_trace kthread for ticks tracking"},
+ { "quiet", ARG_QUIET, "{0,1}", 0,
+ "If true, don't report progress"},
+ {},
+};
+
+static error_t parse_arg(int key, char *arg, struct argp_state *state)
+{
+ long ret;
+
+ switch (key) {
+ case ARG_NR_PROCS:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1 || ret > UINT_MAX) {
+ fprintf(stderr, "invalid nr_procs\n");
+ argp_usage(state);
+ }
+ args.nr_procs = ret;
+ break;
+ case ARG_KTHREAD_PID:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 1) {
+ fprintf(stderr, "invalid kthread_pid\n");
+ argp_usage(state);
+ }
+ args.kthread_pid = ret;
+ break;
+ case ARG_QUIET:
+ ret = strtol(arg, NULL, 10);
+ if (ret < 0 || ret > 1) {
+ fprintf(stderr, "invalid quiet %ld\n", ret);
+ argp_usage(state);
+ }
+ args.quiet = ret;
+ break;
+break;
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+
+ return 0;
+}
+
+const struct argp bench_local_storage_rcu_tasks_trace_argp = {
+ .options = opts,
+ .parser = parse_arg,
+};
+
+#define MAX_SLEEP_PROCS 150000
+
+static void validate(void)
+{
+ if (env.producer_cnt != 1) {
+ fprintf(stderr, "benchmark doesn't support multi-producer!\n");
+ exit(1);
+ }
+ if (env.consumer_cnt != 1) {
+ fprintf(stderr, "benchmark doesn't support multi-consumer!\n");
+ exit(1);
+ }
+
+ if (args.nr_procs > MAX_SLEEP_PROCS) {
+ fprintf(stderr, "benchmark supports up to %u sleeper procs!\n",
+ MAX_SLEEP_PROCS);
+ exit(1);
+ }
+}
+
+static long kthread_pid_ticks(void)
+{
+ char procfs_path[100];
+ long stime;
+ FILE *f;
+
+ if (!args.kthread_pid)
+ return -1;
+
+ sprintf(procfs_path, "/proc/%u/stat", args.kthread_pid);
+ f = fopen(procfs_path, "r");
+ if (!f) {
+ fprintf(stderr, "couldn't open %s, exiting\n", procfs_path);
+ goto err_out;
+ }
+ if (fscanf(f, "%*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %ld", &stime) != 1) {
+ fprintf(stderr, "fscanf of %s failed, exiting\n", procfs_path);
+ goto err_out;
+ }
+ fclose(f);
+ return stime;
+
+err_out:
+ if (f)
+ fclose(f);
+ exit(1);
+ return 0;
+}
+
+static struct {
+ struct local_storage_rcu_tasks_trace_bench *skel;
+ long prev_kthread_stime;
+} ctx;
+
+static void sleep_and_loop(void)
+{
+ while (true) {
+ sleep(rand() % 4);
+ syscall(__NR_getpgid);
+ }
+}
+
+static void local_storage_tasks_trace_setup(void)
+{
+ int i, err, forkret, runner_pid;
+
+ runner_pid = getpid();
+
+ for (i = 0; i < args.nr_procs; i++) {
+ forkret = fork();
+ if (forkret < 0) {
+ fprintf(stderr, "Error forking sleeper proc %u of %u, exiting\n", i,
+ args.nr_procs);
+ goto err_out;
+ }
+
+ if (!forkret) {
+ err = prctl(PR_SET_PDEATHSIG, SIGKILL);
+ if (err < 0) {
+ fprintf(stderr, "prctl failed with err %d, exiting\n", errno);
+ goto err_out;
+ }
+
+ if (getppid() != runner_pid) {
+ fprintf(stderr, "Runner died while spinning up procs, exiting\n");
+ goto err_out;
+ }
+ sleep_and_loop();
+ }
+ }
+ printf("Spun up %u procs (our pid %d)\n", args.nr_procs, runner_pid);
+
+ setup_libbpf();
+
+ ctx.skel = local_storage_rcu_tasks_trace_bench__open_and_load();
+ if (!ctx.skel) {
+ fprintf(stderr, "Error doing open_and_load, exiting\n");
+ goto err_out;
+ }
+
+ ctx.prev_kthread_stime = kthread_pid_ticks();
+
+ if (!bpf_program__attach(ctx.skel->progs.get_local)) {
+ fprintf(stderr, "Error attaching bpf program\n");
+ goto err_out;
+ }
+
+ if (!bpf_program__attach(ctx.skel->progs.pregp_step)) {
+ fprintf(stderr, "Error attaching bpf program\n");
+ goto err_out;
+ }
+
+ if (!bpf_program__attach(ctx.skel->progs.postgp)) {
+ fprintf(stderr, "Error attaching bpf program\n");
+ goto err_out;
+ }
+
+ return;
+err_out:
+ exit(1);
+}
+
+static void measure(struct bench_res *res)
+{
+ long ticks;
+
+ res->gp_ct = atomic_swap(&ctx.skel->bss->gp_hits, 0);
+ res->gp_ns = atomic_swap(&ctx.skel->bss->gp_times, 0);
+ ticks = kthread_pid_ticks();
+ res->stime = ticks - ctx.prev_kthread_stime;
+ ctx.prev_kthread_stime = ticks;
+}
+
+static void *consumer(void *input)
+{
+ return NULL;
+}
+
+static void *producer(void *input)
+{
+ while (true)
+ syscall(__NR_getpgid);
+ return NULL;
+}
+
+static void report_progress(int iter, struct bench_res *res, long delta_ns)
+{
+ if (ctx.skel->bss->unexpected) {
+ fprintf(stderr, "Error: Unexpected order of bpf prog calls (postgp after pregp).");
+ fprintf(stderr, "Data can't be trusted, exiting\n");
+ exit(1);
+ }
+
+ if (args.quiet)
+ return;
+
+ printf("Iter %d\t avg tasks_trace grace period latency\t%lf ns\n",
+ iter, res->gp_ns / (double)res->gp_ct);
+ printf("Iter %d\t avg ticks per tasks_trace grace period\t%lf\n",
+ iter, res->stime / (double)res->gp_ct);
+}
+
+static void report_final(struct bench_res res[], int res_cnt)
+{
+ struct basic_stats gp_stat;
+
+ grace_period_latency_basic_stats(res, res_cnt, &gp_stat);
+ printf("SUMMARY tasks_trace grace period latency");
+ printf("\tavg %.3lf us\tstddev %.3lf us\n", gp_stat.mean, gp_stat.stddev);
+ grace_period_ticks_basic_stats(res, res_cnt, &gp_stat);
+ printf("SUMMARY ticks per tasks_trace grace period");
+ printf("\tavg %.3lf\tstddev %.3lf\n", gp_stat.mean, gp_stat.stddev);
+}
+
+/* local-storage-tasks-trace: Benchmark performance of BPF local_storage's use
+ * of RCU Tasks-Trace.
+ *
+ * Stress RCU Tasks Trace by forking many tasks, all of which do no work aside
+ * from sleep() loop, and creating/destroying BPF task-local storage on wakeup.
+ * The number of forked tasks is configurable.
+ *
+ * exercising code paths which call call_rcu_tasks_trace while there are many
+ * thousands of tasks on the system should result in RCU Tasks-Trace having to
+ * do a noticeable amount of work.
+ *
+ * This should be observable by measuring rcu_tasks_trace_kthread CPU usage
+ * after the grace period has ended, or by measuring grace period latency.
+ *
+ * This benchmark uses both approaches, attaching to rcu_tasks_trace_pregp_step
+ * and rcu_tasks_trace_postgp functions to measure grace period latency and
+ * using /proc/PID/stat to measure rcu_tasks_trace_kthread kernel ticks
+ */
+const struct bench bench_local_storage_tasks_trace = {
+ .name = "local-storage-tasks-trace",
+ .validate = validate,
+ .setup = local_storage_tasks_trace_setup,
+ .producer_thread = producer,
+ .consumer_thread = consumer,
+ .measure = measure,
+ .report_progress = report_progress,
+ .report_final = report_final,
+};
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_bpf_hashmap_full_update.sh b/tools/testing/selftests/bpf/benchs/run_bench_bpf_hashmap_full_update.sh
new file mode 100755
index 000000000000..1e2de838f9fa
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/run_bench_bpf_hashmap_full_update.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source ./benchs/run_common.sh
+
+set -eufo pipefail
+
+nr_threads=`expr $(cat /proc/cpuinfo | grep "processor"| wc -l) - 1`
+summary=$($RUN_BENCH -p $nr_threads bpf-hashmap-ful-update)
+printf "$summary"
+printf "\n"
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_local_storage.sh b/tools/testing/selftests/bpf/benchs/run_bench_local_storage.sh
new file mode 100755
index 000000000000..2eb2b513a173
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/run_bench_local_storage.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source ./benchs/run_common.sh
+
+set -eufo pipefail
+
+header "Hashmap Control"
+for i in 10 1000 10000 100000 4194304; do
+subtitle "num keys: $i"
+ summarize_local_storage "hashmap (control) sequential get: "\
+ "$(./bench --nr_maps 1 --hashmap_nr_keys_used=$i local-storage-cache-hashmap-control)"
+ printf "\n"
+done
+
+header "Local Storage"
+for i in 1 10 16 17 24 32 100 1000; do
+subtitle "num_maps: $i"
+ summarize_local_storage "local_storage cache sequential get: "\
+ "$(./bench --nr_maps $i local-storage-cache-seq-get)"
+ summarize_local_storage "local_storage cache interleaved get: "\
+ "$(./bench --nr_maps $i local-storage-cache-int-get)"
+ printf "\n"
+done
diff --git a/tools/testing/selftests/bpf/benchs/run_bench_local_storage_rcu_tasks_trace.sh b/tools/testing/selftests/bpf/benchs/run_bench_local_storage_rcu_tasks_trace.sh
new file mode 100755
index 000000000000..5dac1f02892c
--- /dev/null
+++ b/tools/testing/selftests/bpf/benchs/run_bench_local_storage_rcu_tasks_trace.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+kthread_pid=`pgrep rcu_tasks_trace_kthread`
+
+if [ -z $kthread_pid ]; then
+ echo "error: Couldn't find rcu_tasks_trace_kthread"
+ exit 1
+fi
+
+./bench --nr_procs 15000 --kthread_pid $kthread_pid -d 600 --quiet 1 local-storage-tasks-trace
diff --git a/tools/testing/selftests/bpf/benchs/run_common.sh b/tools/testing/selftests/bpf/benchs/run_common.sh
index 6c5e6023a69f..d9f40af82006 100644
--- a/tools/testing/selftests/bpf/benchs/run_common.sh
+++ b/tools/testing/selftests/bpf/benchs/run_common.sh
@@ -41,6 +41,16 @@ function ops()
echo "$*" | sed -E "s/.*latency\s+([0-9]+\.[0-9]+\sns\/op).*/\1/"
}
+function local_storage()
+{
+ echo -n "hits throughput: "
+ echo -n "$*" | sed -E "s/.* hits throughput\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+\sM\sops\/s).*/\1/"
+ echo -n -e ", hits latency: "
+ echo -n "$*" | sed -E "s/.* hits latency\s+([0-9]+\.[0-9]+\sns\/op).*/\1/"
+ echo -n ", important_hits throughput: "
+ echo "$*" | sed -E "s/.*important_hits throughput\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+\sM\sops\/s).*/\1/"
+}
+
function total()
{
echo "$*" | sed -E "s/.*total operations\s+([0-9]+\.[0-9]+ ± [0-9]+\.[0-9]+M\/s).*/\1/"
@@ -67,6 +77,13 @@ function summarize_ops()
printf "%-20s %s\n" "$bench" "$(ops $summary)"
}
+function summarize_local_storage()
+{
+ bench="$1"
+ summary=$(echo $2 | tail -n1)
+ printf "%-20s %s\n" "$bench" "$(local_storage $summary)"
+}
+
function summarize_total()
{
bench="$1"
diff --git a/tools/testing/selftests/bpf/bpf_legacy.h b/tools/testing/selftests/bpf/bpf_legacy.h
index 719ab56cdb5d..845209581440 100644
--- a/tools/testing/selftests/bpf/bpf_legacy.h
+++ b/tools/testing/selftests/bpf/bpf_legacy.h
@@ -2,15 +2,6 @@
#ifndef __BPF_LEGACY__
#define __BPF_LEGACY__
-#define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \
- struct ____btf_map_##name { \
- type_key key; \
- type_val value; \
- }; \
- struct ____btf_map_##name \
- __attribute__ ((section(".maps." #name), used)) \
- ____btf_map_##name = { }
-
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
*/
diff --git a/tools/testing/selftests/bpf/bpf_rlimit.h b/tools/testing/selftests/bpf/bpf_rlimit.h
deleted file mode 100644
index 9dac9b30f8ef..000000000000
--- a/tools/testing/selftests/bpf/bpf_rlimit.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#include <sys/resource.h>
-#include <stdio.h>
-
-static __attribute__((constructor)) void bpf_rlimit_ctor(void)
-{
- struct rlimit rlim_old, rlim_new = {
- .rlim_cur = RLIM_INFINITY,
- .rlim_max = RLIM_INFINITY,
- };
-
- getrlimit(RLIMIT_MEMLOCK, &rlim_old);
- /* For the sake of running the test cases, we temporarily
- * set rlimit to infinity in order for kernel to focus on
- * errors from actual test cases and not getting noise
- * from hitting memlock limits. The limit is on per-process
- * basis and not a global one, hence destructor not really
- * needed here.
- */
- if (setrlimit(RLIMIT_MEMLOCK, &rlim_new) < 0) {
- perror("Unable to lift memlock rlimit");
- /* Trying out lower limit, but expect potential test
- * case failures from this!
- */
- rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
- rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
- setrlimit(RLIMIT_MEMLOCK, &rlim_new);
- }
-}
diff --git a/tools/testing/selftests/bpf/bpf_tcp_helpers.h b/tools/testing/selftests/bpf/bpf_tcp_helpers.h
index b1ede6f0b821..82a7c9de95f9 100644
--- a/tools/testing/selftests/bpf/bpf_tcp_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_tcp_helpers.h
@@ -16,6 +16,10 @@ BPF_PROG(name, args)
#define SOL_TCP 6
#endif
+#ifndef TCP_CA_NAME_MAX
+#define TCP_CA_NAME_MAX 16
+#endif
+
#define tcp_jiffies32 ((__u32)bpf_jiffies64())
struct sock_common {
@@ -81,6 +85,7 @@ struct tcp_sock {
__u32 lsndtime;
__u32 prior_cwnd;
__u64 tcp_mstamp; /* most recent packet received/sent */
+ bool is_mptcp;
} __attribute__((preserve_access_index));
static __always_inline struct inet_connection_sock *inet_csk(const struct sock *sk)
@@ -225,4 +230,12 @@ static __always_inline bool tcp_cc_eq(const char *a, const char *b)
extern __u32 tcp_slow_start(struct tcp_sock *tp, __u32 acked) __ksym;
extern void tcp_cong_avoid_ai(struct tcp_sock *tp, __u32 w, __u32 acked) __ksym;
+struct mptcp_sock {
+ struct inet_connection_sock sk;
+
+ __u32 token;
+ struct sock *first;
+ char ca_name[TCP_CA_NAME_MAX];
+} __attribute__((preserve_access_index));
+
#endif
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index e585e1cefc77..792cb15bac40 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -148,13 +148,13 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
.write = bpf_testmod_test_write,
};
-BTF_SET_START(bpf_testmod_check_kfunc_ids)
-BTF_ID(func, bpf_testmod_test_mod_kfunc)
-BTF_SET_END(bpf_testmod_check_kfunc_ids)
+BTF_SET8_START(bpf_testmod_check_kfunc_ids)
+BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
+BTF_SET8_END(bpf_testmod_check_kfunc_ids)
static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
- .owner = THIS_MODULE,
- .check_set = &bpf_testmod_check_kfunc_ids,
+ .owner = THIS_MODULE,
+ .set = &bpf_testmod_check_kfunc_ids,
};
extern int bpf_fentry_test1(int a);
diff --git a/tools/testing/selftests/bpf/btf_helpers.c b/tools/testing/selftests/bpf/btf_helpers.c
index b5941d514e17..1c1c2c26690a 100644
--- a/tools/testing/selftests/bpf/btf_helpers.c
+++ b/tools/testing/selftests/bpf/btf_helpers.c
@@ -26,11 +26,12 @@ static const char * const btf_kind_str_mapping[] = {
[BTF_KIND_FLOAT] = "FLOAT",
[BTF_KIND_DECL_TAG] = "DECL_TAG",
[BTF_KIND_TYPE_TAG] = "TYPE_TAG",
+ [BTF_KIND_ENUM64] = "ENUM64",
};
static const char *btf_kind_str(__u16 kind)
{
- if (kind > BTF_KIND_TYPE_TAG)
+ if (kind > BTF_KIND_ENUM64)
return "UNKNOWN";
return btf_kind_str_mapping[kind];
}
@@ -139,14 +140,32 @@ int fprintf_btf_type_raw(FILE *out, const struct btf *btf, __u32 id)
}
case BTF_KIND_ENUM: {
const struct btf_enum *v = btf_enum(t);
+ const char *fmt_str;
- fprintf(out, " size=%u vlen=%u", t->size, vlen);
+ fmt_str = btf_kflag(t) ? "\n\t'%s' val=%d" : "\n\t'%s' val=%u";
+ fprintf(out, " encoding=%s size=%u vlen=%u",
+ btf_kflag(t) ? "SIGNED" : "UNSIGNED", t->size, vlen);
for (i = 0; i < vlen; i++, v++) {
- fprintf(out, "\n\t'%s' val=%u",
+ fprintf(out, fmt_str,
btf_str(btf, v->name_off), v->val);
}
break;
}
+ case BTF_KIND_ENUM64: {
+ const struct btf_enum64 *v = btf_enum64(t);
+ const char *fmt_str;
+
+ fmt_str = btf_kflag(t) ? "\n\t'%s' val=%lld" : "\n\t'%s' val=%llu";
+
+ fprintf(out, " encoding=%s size=%u vlen=%u",
+ btf_kflag(t) ? "SIGNED" : "UNSIGNED", t->size, vlen);
+ for (i = 0; i < vlen; i++, v++) {
+ fprintf(out, fmt_str,
+ btf_str(btf, v->name_off),
+ ((__u64)v->val_hi32 << 32) | v->val_lo32);
+ }
+ break;
+ }
case BTF_KIND_FWD:
fprintf(out, " fwd_kind=%s", btf_kflag(t) ? "union" : "struct");
break;
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 763db63a3890..fabf0c014349 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -1,55 +1,64 @@
+CONFIG_BLK_DEV_LOOP=y
CONFIG_BPF=y
-CONFIG_BPF_SYSCALL=y
-CONFIG_NET_CLS_BPF=m
CONFIG_BPF_EVENTS=y
-CONFIG_TEST_BPF=m
+CONFIG_BPF_JIT=y
+CONFIG_BPF_LIRC_MODE2=y
+CONFIG_BPF_LSM=y
+CONFIG_BPF_STREAM_PARSER=y
+CONFIG_BPF_SYSCALL=y
CONFIG_CGROUP_BPF=y
-CONFIG_NETDEVSIM=m
-CONFIG_NET_CLS_ACT=y
-CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_INGRESS=y
-CONFIG_NET_IPIP=y
-CONFIG_IPV6=y
-CONFIG_NET_IPGRE_DEMUX=y
-CONFIG_NET_IPGRE=y
-CONFIG_IPV6_GRE=y
-CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_HMAC=m
CONFIG_CRYPTO_SHA256=m
-CONFIG_VXLAN=y
-CONFIG_GENEVE=y
-CONFIG_NET_CLS_FLOWER=m
-CONFIG_LWTUNNEL=y
-CONFIG_BPF_STREAM_PARSER=y
-CONFIG_XDP_SOCKETS=y
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_DYNAMIC_FTRACE=y
+CONFIG_FPROBE=y
CONFIG_FTRACE_SYSCALLS=y
-CONFIG_IPV6_TUNNEL=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_GENEVE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_IMA=y
+CONFIG_IMA_READ_POLICY=y
+CONFIG_IMA_WRITE_POLICY=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_TARGET_SYNPROXY=y
+CONFIG_IPV6=y
+CONFIG_IPV6_FOU=m
+CONFIG_IPV6_FOU_TUNNEL=m
CONFIG_IPV6_GRE=y
CONFIG_IPV6_SEG6_BPF=y
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_TUNNEL=y
+CONFIG_LIRC=y
+CONFIG_LWTUNNEL=y
+CONFIG_MPLS=y
+CONFIG_MPLS_IPTUNNEL=m
+CONFIG_MPLS_ROUTING=m
+CONFIG_MPTCP=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_CLS_FLOWER=m
CONFIG_NET_FOU=m
CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_IPV6_FOU=m
-CONFIG_IPV6_FOU_TUNNEL=m
-CONFIG_MPLS=y
+CONFIG_NET_IPGRE=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPIP=y
CONFIG_NET_MPLS_GSO=m
-CONFIG_MPLS_ROUTING=m
-CONFIG_MPLS_IPTUNNEL=m
-CONFIG_IPV6_SIT=m
-CONFIG_BPF_JIT=y
-CONFIG_BPF_LSM=y
-CONFIG_SECURITY=y
-CONFIG_RC_CORE=y
-CONFIG_LIRC=y
-CONFIG_BPF_LIRC_MODE2=y
-CONFIG_IMA=y
-CONFIG_SECURITYFS=y
-CONFIG_IMA_WRITE_POLICY=y
-CONFIG_IMA_READ_POLICY=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_FUNCTION_TRACER=y
-CONFIG_DYNAMIC_FTRACE=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_SCHED=y
+CONFIG_NETDEVSIM=m
CONFIG_NETFILTER=y
+CONFIG_NETFILTER_SYNPROXY=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_TARGET_CT=y
+CONFIG_NF_CONNTRACK=y
CONFIG_NF_DEFRAG_IPV4=y
CONFIG_NF_DEFRAG_IPV6=y
-CONFIG_NF_CONNTRACK=y
+CONFIG_RC_CORE=y
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
+CONFIG_TEST_BPF=m
CONFIG_USERFAULTFD=y
+CONFIG_VXLAN=y
+CONFIG_XDP_SOCKETS=y
diff --git a/tools/testing/selftests/bpf/config.s390x b/tools/testing/selftests/bpf/config.s390x
new file mode 100644
index 000000000000..f8a7a258a718
--- /dev/null
+++ b/tools/testing/selftests/bpf/config.s390x
@@ -0,0 +1,147 @@
+CONFIG_9P_FS=y
+CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y
+CONFIG_AUDIT=y
+CONFIG_BLK_CGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BONDING=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_JIT_DEFAULT_ON=y
+CONFIG_BPF_PRELOAD=y
+CONFIG_BPF_PRELOAD_UMD=y
+CONFIG_BPFILTER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CGROUP_NET_CLASSID=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUPS=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_CPUSETS=y
+CONFIG_CRASH_DUMP=y
+CONFIG_CRYPTO_USER_API_RNG=y
+CONFIG_CRYPTO_USER_API_SKCIPHER=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_INFO_BTF=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_SG=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEVTMPFS=y
+CONFIG_EXPERT=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FANOTIFY=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_GDB_SCRIPTS=y
+CONFIG_HAVE_EBPF_JIT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KPROBES_ON_FTRACE=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_MARCH_Z10_FEATURES=y
+CONFIG_HAVE_MARCH_Z196_FEATURES=y
+CONFIG_HEADERS_INSTALL=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_HUGETLBFS=y
+CONFIG_HW_RANDOM=y
+CONFIG_HZ_100=y
+CONFIG_IDLE_PAGE_TRACKING=y
+CONFIG_IKHEADERS=y
+CONFIG_INET6_ESP=y
+CONFIG_INET=y
+CONFIG_INET_ESP=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IPV6_SEG6_LWTUNNEL=y
+CONFIG_IPVLAN=y
+CONFIG_JUMP_LABEL=y
+CONFIG_KERNEL_UNCOMPRESSED=y
+CONFIG_KPROBES=y
+CONFIG_KPROBES_ON_FTRACE=y
+CONFIG_KRETPROBES=y
+CONFIG_KSM=y
+CONFIG_LATENCYTOP=y
+CONFIG_LIVEPATCH=y
+CONFIG_LOCK_STAT=y
+CONFIG_MACVLAN=y
+CONFIG_MACVTAP=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MARCH_Z196=y
+CONFIG_MARCH_Z196_TUNE=y
+CONFIG_MEMCG=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULES=y
+CONFIG_NAMESPACES=y
+CONFIG_NET=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_NET_ACT_BPF=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_KEY=y
+CONFIG_NET_SCH_FQ=y
+CONFIG_NET_VRF=y
+CONFIG_NETDEVICES=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NF_TABLES=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NR_CPUS=256
+CONFIG_NUMA=y
+CONFIG_PACKET=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PCI=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROFILING=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PTDUMP_DEBUGFS=y
+CONFIG_RC_DEVICES=y
+CONFIG_RC_LOOPBACK=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SAMPLE_SECCOMP=y
+CONFIG_SAMPLES=y
+CONFIG_SCHED_TRACER=y
+CONFIG_SCSI=y
+CONFIG_SCSI_VIRTIO=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_STACK_TRACER=y
+CONFIG_STATIC_KEYS_SELFTEST=y
+CONFIG_SYSVIPC=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_DCTCP=y
+CONFIG_TLS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TUN=y
+CONFIG_UNIX=y
+CONFIG_UPROBES=y
+CONFIG_USELIB=y
+CONFIG_USER_NS=y
+CONFIG_VETH=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_VIRTIO_NET=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VLAN_8021Q=y
+CONFIG_XFRM_USER=y
diff --git a/tools/testing/selftests/bpf/config.x86_64 b/tools/testing/selftests/bpf/config.x86_64
new file mode 100644
index 000000000000..f0859a1d37ab
--- /dev/null
+++ b/tools/testing/selftests/bpf/config.x86_64
@@ -0,0 +1,251 @@
+CONFIG_9P_FS=y
+CONFIG_9P_FS_POSIX_ACL=y
+CONFIG_9P_FS_SECURITY=y
+CONFIG_AGP=y
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+CONFIG_AGP_SIS=y
+CONFIG_AGP_VIA=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_AUDIT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BINFMT_MISC=y
+CONFIG_BLK_CGROUP=y
+CONFIG_BLK_CGROUP_IOLATENCY=y
+CONFIG_BLK_DEV_BSGLIB=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BONDING=y
+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
+CONFIG_BOOTTIME_TRACING=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+CONFIG_BPF_KPROBE_OVERRIDE=y
+CONFIG_BPF_PRELOAD=y
+CONFIG_BPF_PRELOAD_UMD=y
+CONFIG_BPFILTER=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUPS=y
+CONFIG_CMA=y
+CONFIG_CMA_AREAS=7
+CONFIG_COMPAT_32BIT_TIME=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPUSETS=y
+CONFIG_CRC_T10DIF=y
+CONFIG_CRYPTO_BLAKE2B=y
+CONFIG_CRYPTO_DEV_VIRTIO=m
+CONFIG_CRYPTO_SEQIV=y
+CONFIG_CRYPTO_XXHASH=y
+CONFIG_DCB=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_CREDENTIALS=y
+CONFIG_DEBUG_INFO_BTF=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEFAULT_FQ_CODEL=y
+CONFIG_DEFAULT_RENO=y
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_EFI=y
+CONFIG_EFI_STUB=y
+CONFIG_EXPERT=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FAIL_FUNCTION=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_FB_VESA=y
+CONFIG_FONT_8x16=y
+CONFIG_FONT_MINI_4x6=y
+CONFIG_FONTS=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_GART_IOMMU=y
+CONFIG_GENERIC_PHY=y
+CONFIG_HARDLOCKUP_DETECTOR=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GREENASIA=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_KYE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_HPET=y
+CONFIG_HUGETLBFS=y
+CONFIG_HWPOISON_INJECT=y
+CONFIG_HZ_1000=y
+CONFIG_INET=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INTEL_POWERCLAMP=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_SEG6_LWTUNNEL=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IRQ_POLL=y
+CONFIG_JUMP_LABEL=y
+CONFIG_KARMA_PARTITION=y
+CONFIG_KEXEC=y
+CONFIG_KPROBES=y
+CONFIG_KSM=y
+CONFIG_LEGACY_VSYSCALL_NONE=y
+CONFIG_LOG_BUF_SHIFT=21
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=0
+CONFIG_LOGO=y
+CONFIG_LSM="selinux,bpf,integrity"
+CONFIG_MAC_PARTITION=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MCORE2=y
+CONFIG_MEMCG=y
+CONFIG_MEMORY_FAILURE=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULES=y
+CONFIG_MODVERSIONS=y
+CONFIG_NAMESPACES=y
+CONFIG_NET=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_NET_ACT_BPF=y
+CONFIG_NET_CLS_CGROUP=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_NET_L3_MASTER_DEV=y
+CONFIG_NET_SCH_DEFAULT=y
+CONFIG_NET_SCH_FQ_CODEL=y
+CONFIG_NET_TC_SKB_EXT=y
+CONFIG_NET_VRF=y
+CONFIG_NETDEVICES=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NETFILTER_NETLINK_QUEUE=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETLABEL=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NO_HZ=y
+CONFIG_NR_CPUS=128
+CONFIG_NUMA=y
+CONFIG_NUMA_BALANCING=y
+CONFIG_NVMEM=y
+CONFIG_OSF_PARTITION=y
+CONFIG_PACKET=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PCI=y
+CONFIG_PCI_IOV=y
+CONFIG_PCI_MSI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PHYSICAL_ALIGN=0x1000000
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_PREEMPT=y
+CONFIG_PRINTK_TIME=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROFILING=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PTP_1588_CLOCK=y
+CONFIG_RC_DEVICES=y
+CONFIG_RC_LOOPBACK=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_SCHED_STACK_END_CHECK=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SGI_PARTITION=y
+CONFIG_SMP=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_SUN_PARTITION=y
+CONFIG_SYNC_FILE=y
+CONFIG_SYSVIPC=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_MD5SIG=y
+CONFIG_TLS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
+CONFIG_TUN=y
+CONFIG_UNIX=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_USER_NS=y
+CONFIG_VALIDATE_FS_PARSER=y
+CONFIG_VETH=y
+CONFIG_VIRT_DRIVERS=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_VIRTIO_NET=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VLAN_8021Q=y
+CONFIG_X86_ACPI_CPUFREQ=y
+CONFIG_X86_CPUID=y
+CONFIG_X86_MSR=y
+CONFIG_X86_POWERNOW_K8=y
+CONFIG_XDP_SOCKETS_DIAG=y
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_USER=y
+CONFIG_ZEROPLUS_FF=y
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.c b/tools/testing/selftests/bpf/flow_dissector_load.c
index 87fd1aa323a9..c8be6406777f 100644
--- a/tools/testing/selftests/bpf/flow_dissector_load.c
+++ b/tools/testing/selftests/bpf/flow_dissector_load.c
@@ -11,7 +11,6 @@
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
-#include "bpf_rlimit.h"
#include "flow_dissector_load.h"
const char *cfg_pin_path = "/sys/fs/bpf/flow_dissector";
@@ -25,9 +24,8 @@ static void load_and_attach_program(void)
int prog_fd, ret;
struct bpf_object *obj;
- ret = libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
- if (ret)
- error(1, 0, "failed to enable libbpf strict mode: %d", ret);
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
ret = bpf_flow_load(&obj, cfg_path_name, cfg_prog_name,
cfg_map_name, NULL, &prog_fd, NULL);
diff --git a/tools/testing/selftests/bpf/get_cgroup_id_user.c b/tools/testing/selftests/bpf/get_cgroup_id_user.c
index 3a7b82bd9e94..e021cc67dc02 100644
--- a/tools/testing/selftests/bpf/get_cgroup_id_user.c
+++ b/tools/testing/selftests/bpf/get_cgroup_id_user.c
@@ -20,7 +20,6 @@
#include "cgroup_helpers.h"
#include "testing_helpers.h"
-#include "bpf_rlimit.h"
#define CHECK(condition, tag, format...) ({ \
int __ret = !!(condition); \
@@ -67,6 +66,9 @@ int main(int argc, char **argv)
if (CHECK(cgroup_fd < 0, "cgroup_setup_and_join", "err %d errno %d\n", cgroup_fd, errno))
return 1;
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "bpf_prog_test_load", "err %d errno %d\n", err, errno))
goto cleanup_cgroup_env;
diff --git a/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c
new file mode 100644
index 000000000000..f472d28ad11a
--- /dev/null
+++ b/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include <test_maps.h>
+
+#define OUTER_MAP_ENTRIES 10
+
+static __u32 get_map_id_from_fd(int map_fd)
+{
+ struct bpf_map_info map_info = {};
+ uint32_t info_len = sizeof(map_info);
+ int ret;
+
+ ret = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
+ CHECK(ret < 0, "Finding map info failed", "error:%s\n",
+ strerror(errno));
+
+ return map_info.id;
+}
+
+/* This creates number of OUTER_MAP_ENTRIES maps that will be stored
+ * in outer map and return the created map_fds
+ */
+static void create_inner_maps(enum bpf_map_type map_type,
+ __u32 *inner_map_fds)
+{
+ int map_fd, map_index, ret;
+ __u32 map_key = 0, map_id;
+ char map_name[15];
+
+ for (map_index = 0; map_index < OUTER_MAP_ENTRIES; map_index++) {
+ memset(map_name, 0, sizeof(map_name));
+ sprintf(map_name, "inner_map_fd_%d", map_index);
+ map_fd = bpf_map_create(map_type, map_name, sizeof(__u32),
+ sizeof(__u32), 1, NULL);
+ CHECK(map_fd < 0,
+ "inner bpf_map_create() failed",
+ "map_type=(%d) map_name(%s), error:%s\n",
+ map_type, map_name, strerror(errno));
+
+ /* keep track of the inner map fd as it is required
+ * to add records in outer map
+ */
+ inner_map_fds[map_index] = map_fd;
+
+ /* Add entry into this created map
+ * eg: map1 key = 0, value = map1's map id
+ * map2 key = 0, value = map2's map id
+ */
+ map_id = get_map_id_from_fd(map_fd);
+ ret = bpf_map_update_elem(map_fd, &map_key, &map_id, 0);
+ CHECK(ret != 0,
+ "bpf_map_update_elem failed",
+ "map_type=(%d) map_name(%s), error:%s\n",
+ map_type, map_name, strerror(errno));
+ }
+}
+
+static int create_outer_map(enum bpf_map_type map_type, __u32 inner_map_fd)
+{
+ int outer_map_fd;
+ LIBBPF_OPTS(bpf_map_create_opts, attr);
+
+ attr.inner_map_fd = inner_map_fd;
+ outer_map_fd = bpf_map_create(map_type, "outer_map", sizeof(__u32),
+ sizeof(__u32), OUTER_MAP_ENTRIES,
+ &attr);
+ CHECK(outer_map_fd < 0,
+ "outer bpf_map_create()",
+ "map_type=(%d), error:%s\n",
+ map_type, strerror(errno));
+
+ return outer_map_fd;
+}
+
+static void validate_fetch_results(int outer_map_fd,
+ __u32 *fetched_keys, __u32 *fetched_values,
+ __u32 max_entries_fetched)
+{
+ __u32 inner_map_key, inner_map_value;
+ int inner_map_fd, entry, err;
+ __u32 outer_map_value;
+
+ for (entry = 0; entry < max_entries_fetched; ++entry) {
+ outer_map_value = fetched_values[entry];
+ inner_map_fd = bpf_map_get_fd_by_id(outer_map_value);
+ CHECK(inner_map_fd < 0,
+ "Failed to get inner map fd",
+ "from id(%d), error=%s\n",
+ outer_map_value, strerror(errno));
+ err = bpf_map_get_next_key(inner_map_fd, NULL, &inner_map_key);
+ CHECK(err != 0,
+ "Failed to get inner map key",
+ "error=%s\n", strerror(errno));
+
+ err = bpf_map_lookup_elem(inner_map_fd, &inner_map_key,
+ &inner_map_value);
+
+ close(inner_map_fd);
+
+ CHECK(err != 0,
+ "Failed to get inner map value",
+ "for key(%d), error=%s\n",
+ inner_map_key, strerror(errno));
+
+ /* Actual value validation */
+ CHECK(outer_map_value != inner_map_value,
+ "Failed to validate inner map value",
+ "fetched(%d) and lookedup(%d)!\n",
+ outer_map_value, inner_map_value);
+ }
+}
+
+static void fetch_and_validate(int outer_map_fd,
+ struct bpf_map_batch_opts *opts,
+ __u32 batch_size, bool delete_entries)
+{
+ __u32 *fetched_keys, *fetched_values, total_fetched = 0;
+ __u32 batch_key = 0, fetch_count, step_size;
+ int err, max_entries = OUTER_MAP_ENTRIES;
+ __u32 value_size = sizeof(__u32);
+
+ /* Total entries needs to be fetched */
+ fetched_keys = calloc(max_entries, value_size);
+ fetched_values = calloc(max_entries, value_size);
+ CHECK((!fetched_keys || !fetched_values),
+ "Memory allocation failed for fetched_keys or fetched_values",
+ "error=%s\n", strerror(errno));
+
+ for (step_size = batch_size;
+ step_size <= max_entries;
+ step_size += batch_size) {
+ fetch_count = step_size;
+ err = delete_entries
+ ? bpf_map_lookup_and_delete_batch(outer_map_fd,
+ total_fetched ? &batch_key : NULL,
+ &batch_key,
+ fetched_keys + total_fetched,
+ fetched_values + total_fetched,
+ &fetch_count, opts)
+ : bpf_map_lookup_batch(outer_map_fd,
+ total_fetched ? &batch_key : NULL,
+ &batch_key,
+ fetched_keys + total_fetched,
+ fetched_values + total_fetched,
+ &fetch_count, opts);
+
+ if (err && errno == ENOSPC) {
+ /* Fetch again with higher batch size */
+ total_fetched = 0;
+ continue;
+ }
+
+ CHECK((err < 0 && (errno != ENOENT)),
+ "lookup with steps failed",
+ "error: %s\n", strerror(errno));
+
+ /* Update the total fetched number */
+ total_fetched += fetch_count;
+ if (err)
+ break;
+ }
+
+ CHECK((total_fetched != max_entries),
+ "Unable to fetch expected entries !",
+ "total_fetched(%d) and max_entries(%d) error: (%d):%s\n",
+ total_fetched, max_entries, errno, strerror(errno));
+
+ /* validate the fetched entries */
+ validate_fetch_results(outer_map_fd, fetched_keys,
+ fetched_values, total_fetched);
+ printf("batch_op(%s) is successful with batch_size(%d)\n",
+ delete_entries ? "LOOKUP_AND_DELETE" : "LOOKUP", batch_size);
+
+ free(fetched_keys);
+ free(fetched_values);
+}
+
+static void _map_in_map_batch_ops(enum bpf_map_type outer_map_type,
+ enum bpf_map_type inner_map_type)
+{
+ __u32 *outer_map_keys, *inner_map_fds;
+ __u32 max_entries = OUTER_MAP_ENTRIES;
+ LIBBPF_OPTS(bpf_map_batch_opts, opts);
+ __u32 value_size = sizeof(__u32);
+ int batch_size[2] = {5, 10};
+ __u32 map_index, op_index;
+ int outer_map_fd, ret;
+
+ outer_map_keys = calloc(max_entries, value_size);
+ inner_map_fds = calloc(max_entries, value_size);
+ CHECK((!outer_map_keys || !inner_map_fds),
+ "Memory allocation failed for outer_map_keys or inner_map_fds",
+ "error=%s\n", strerror(errno));
+
+ create_inner_maps(inner_map_type, inner_map_fds);
+
+ outer_map_fd = create_outer_map(outer_map_type, *inner_map_fds);
+ /* create outer map keys */
+ for (map_index = 0; map_index < max_entries; map_index++)
+ outer_map_keys[map_index] =
+ ((outer_map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
+ ? 9 : 1000) - map_index;
+
+ /* batch operation - map_update */
+ ret = bpf_map_update_batch(outer_map_fd, outer_map_keys,
+ inner_map_fds, &max_entries, &opts);
+ CHECK(ret != 0,
+ "Failed to update the outer map batch ops",
+ "error=%s\n", strerror(errno));
+
+ /* batch operation - map_lookup */
+ for (op_index = 0; op_index < 2; ++op_index)
+ fetch_and_validate(outer_map_fd, &opts,
+ batch_size[op_index], false);
+
+ /* batch operation - map_lookup_delete */
+ if (outer_map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
+ fetch_and_validate(outer_map_fd, &opts,
+ max_entries, true /*delete*/);
+
+ /* close all map fds */
+ for (map_index = 0; map_index < max_entries; map_index++)
+ close(inner_map_fds[map_index]);
+ close(outer_map_fd);
+
+ free(inner_map_fds);
+ free(outer_map_keys);
+}
+
+void test_map_in_map_batch_ops_array(void)
+{
+ _map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_ARRAY);
+ printf("%s:PASS with inner ARRAY map\n", __func__);
+ _map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_HASH);
+ printf("%s:PASS with inner HASH map\n", __func__);
+}
+
+void test_map_in_map_batch_ops_hash(void)
+{
+ _map_in_map_batch_ops(BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_ARRAY);
+ printf("%s:PASS with inner ARRAY map\n", __func__);
+ _map_in_map_batch_ops(BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_HASH);
+ printf("%s:PASS with inner HASH map\n", __func__);
+}
diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
index 2bb1f9b3841d..bec15558fd93 100644
--- a/tools/testing/selftests/bpf/network_helpers.c
+++ b/tools/testing/selftests/bpf/network_helpers.c
@@ -21,6 +21,10 @@
#include "network_helpers.h"
#include "test_progs.h"
+#ifndef IPPROTO_MPTCP
+#define IPPROTO_MPTCP 262
+#endif
+
#define clean_errno() (errno == 0 ? "None" : strerror(errno))
#define log_err(MSG, ...) ({ \
int __save = errno; \
@@ -73,13 +77,13 @@ int settimeo(int fd, int timeout_ms)
#define save_errno_close(fd) ({ int __save = errno; close(fd); errno = __save; })
-static int __start_server(int type, const struct sockaddr *addr,
+static int __start_server(int type, int protocol, const struct sockaddr *addr,
socklen_t addrlen, int timeout_ms, bool reuseport)
{
int on = 1;
int fd;
- fd = socket(addr->sa_family, type, 0);
+ fd = socket(addr->sa_family, type, protocol);
if (fd < 0) {
log_err("Failed to create server socket");
return -1;
@@ -113,8 +117,8 @@ error_close:
return -1;
}
-int start_server(int family, int type, const char *addr_str, __u16 port,
- int timeout_ms)
+static int start_server_proto(int family, int type, int protocol,
+ const char *addr_str, __u16 port, int timeout_ms)
{
struct sockaddr_storage addr;
socklen_t addrlen;
@@ -122,10 +126,23 @@ int start_server(int family, int type, const char *addr_str, __u16 port,
if (make_sockaddr(family, addr_str, port, &addr, &addrlen))
return -1;
- return __start_server(type, (struct sockaddr *)&addr,
+ return __start_server(type, protocol, (struct sockaddr *)&addr,
addrlen, timeout_ms, false);
}
+int start_server(int family, int type, const char *addr_str, __u16 port,
+ int timeout_ms)
+{
+ return start_server_proto(family, type, 0, addr_str, port, timeout_ms);
+}
+
+int start_mptcp_server(int family, const char *addr_str, __u16 port,
+ int timeout_ms)
+{
+ return start_server_proto(family, SOCK_STREAM, IPPROTO_MPTCP, addr_str,
+ port, timeout_ms);
+}
+
int *start_reuseport_server(int family, int type, const char *addr_str,
__u16 port, int timeout_ms, unsigned int nr_listens)
{
@@ -144,7 +161,7 @@ int *start_reuseport_server(int family, int type, const char *addr_str,
if (!fds)
return NULL;
- fds[0] = __start_server(type, (struct sockaddr *)&addr, addrlen,
+ fds[0] = __start_server(type, 0, (struct sockaddr *)&addr, addrlen,
timeout_ms, true);
if (fds[0] == -1)
goto close_fds;
@@ -154,7 +171,7 @@ int *start_reuseport_server(int family, int type, const char *addr_str,
goto close_fds;
for (; nr_fds < nr_listens; nr_fds++) {
- fds[nr_fds] = __start_server(type, (struct sockaddr *)&addr,
+ fds[nr_fds] = __start_server(type, 0, (struct sockaddr *)&addr,
addrlen, timeout_ms, true);
if (fds[nr_fds] == -1)
goto close_fds;
@@ -247,7 +264,7 @@ int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts)
struct sockaddr_storage addr;
struct sockaddr_in *addr_in;
socklen_t addrlen, optlen;
- int fd, type;
+ int fd, type, protocol;
if (!opts)
opts = &default_opts;
@@ -258,6 +275,11 @@ int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts)
return -1;
}
+ if (getsockopt(server_fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &optlen)) {
+ log_err("getsockopt(SOL_PROTOCOL)");
+ return -1;
+ }
+
addrlen = sizeof(addr);
if (getsockname(server_fd, (struct sockaddr *)&addr, &addrlen)) {
log_err("Failed to get server addr");
@@ -265,7 +287,7 @@ int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts)
}
addr_in = (struct sockaddr_in *)&addr;
- fd = socket(addr_in->sin_family, type, 0);
+ fd = socket(addr_in->sin_family, type, protocol);
if (fd < 0) {
log_err("Failed to create client socket");
return -1;
@@ -414,7 +436,7 @@ struct nstoken *open_netns(const char *name)
int err;
struct nstoken *token;
- token = malloc(sizeof(struct nstoken));
+ token = calloc(1, sizeof(struct nstoken));
if (!ASSERT_OK_PTR(token, "malloc token"))
return NULL;
diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h
index a4b3b2f9877b..f882c691b790 100644
--- a/tools/testing/selftests/bpf/network_helpers.h
+++ b/tools/testing/selftests/bpf/network_helpers.h
@@ -42,6 +42,8 @@ extern struct ipv6_packet pkt_v6;
int settimeo(int fd, int timeout_ms);
int start_server(int family, int type, const char *addr, __u16 port,
int timeout_ms);
+int start_mptcp_server(int family, const char *addr, __u16 port,
+ int timeout_ms);
int *start_reuseport_server(int family, int type, const char *addr_str,
__u16 port, int timeout_ms,
unsigned int nr_listens);
diff --git a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c
new file mode 100644
index 000000000000..b17bfa0e0aac
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+#include "test_progs.h"
+#include "testing_helpers.h"
+
+static void init_test_filter_set(struct test_filter_set *set)
+{
+ set->cnt = 0;
+ set->tests = NULL;
+}
+
+static void free_test_filter_set(struct test_filter_set *set)
+{
+ int i, j;
+
+ for (i = 0; i < set->cnt; i++) {
+ for (j = 0; j < set->tests[i].subtest_cnt; j++)
+ free((void *)set->tests[i].subtests[j]);
+ free(set->tests[i].subtests);
+ free(set->tests[i].name);
+ }
+
+ free(set->tests);
+ init_test_filter_set(set);
+}
+
+static void test_parse_test_list(void)
+{
+ struct test_filter_set set;
+
+ init_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("arg_parsing", &set, true), "parsing");
+ if (!ASSERT_EQ(set.cnt, 1, "test filters count"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ ASSERT_EQ(set.tests[0].subtest_cnt, 0, "subtest filters count");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "subtest name");
+ free_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("arg_parsing,bpf_cookie", &set, true),
+ "parsing");
+ if (!ASSERT_EQ(set.cnt, 2, "count of test filters"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ ASSERT_EQ(set.tests[0].subtest_cnt, 0, "subtest filters count");
+ ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name");
+ ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name");
+ free_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("arg_parsing/arg_parsing,bpf_cookie",
+ &set,
+ true),
+ "parsing");
+ if (!ASSERT_EQ(set.cnt, 2, "count of test filters"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count"))
+ goto error;
+ ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].subtests[0]),
+ "subtest name");
+ ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name");
+ free_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("arg_parsing/arg_parsing", &set, true),
+ "parsing");
+ ASSERT_OK(parse_test_list("bpf_cookie", &set, true), "parsing");
+ ASSERT_OK(parse_test_list("send_signal", &set, true), "parsing");
+ if (!ASSERT_EQ(set.cnt, 3, "count of test filters"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count"))
+ goto error;
+ ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count");
+ ASSERT_EQ(set.tests[2].subtest_cnt, 0, "subtest filters count");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].subtests[0]),
+ "subtest name");
+ ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name");
+ ASSERT_OK(strcmp("send_signal", set.tests[2].name), "test name");
+ free_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("bpf_cookie/trace", &set, false), "parsing");
+ if (!ASSERT_EQ(set.cnt, 1, "count of test filters"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count"))
+ goto error;
+ ASSERT_OK(strcmp("*bpf_cookie*", set.tests[0].name), "test name");
+ ASSERT_OK(strcmp("*trace*", set.tests[0].subtests[0]), "subtest name");
+error:
+ free_test_filter_set(&set);
+}
+
+void test_arg_parsing(void)
+{
+ if (test__start_subtest("test_parse_test_list"))
+ test_parse_test_list();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
index d48f6e533e1e..0b899d2d8ea7 100644
--- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
+++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
@@ -11,15 +11,30 @@ static void trigger_func(void)
asm volatile ("");
}
+/* attach point for byname uprobe */
+static void trigger_func2(void)
+{
+ asm volatile ("");
+}
+
+/* attach point for byname sleepable uprobe */
+static void trigger_func3(void)
+{
+ asm volatile ("");
+}
+
+static char test_data[] = "test_data";
+
void test_attach_probe(void)
{
DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
- int duration = 0;
struct bpf_link *kprobe_link, *kretprobe_link;
struct bpf_link *uprobe_link, *uretprobe_link;
struct test_attach_probe* skel;
ssize_t uprobe_offset, ref_ctr_offset;
+ struct bpf_link *uprobe_err_link;
bool legacy;
+ char *mem;
/* Check if new-style kprobe/uprobe API is supported.
* Kernels that support new FD-based kprobe and uprobe BPF attachment
@@ -42,12 +57,21 @@ void test_attach_probe(void)
if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset"))
return;
- skel = test_attach_probe__open_and_load();
- if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ skel = test_attach_probe__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
- if (CHECK(!skel->bss, "check_bss", ".bss wasn't mmap()-ed\n"))
+
+ /* sleepable kprobe test case needs flags set before loading */
+ if (!ASSERT_OK(bpf_program__set_flags(skel->progs.handle_kprobe_sleepable,
+ BPF_F_SLEEPABLE), "kprobe_sleepable_flags"))
+ goto cleanup;
+
+ if (!ASSERT_OK(test_attach_probe__load(skel), "skel_load"))
+ goto cleanup;
+ if (!ASSERT_OK_PTR(skel->bss, "check_bss"))
goto cleanup;
+ /* manual-attach kprobe/kretprobe */
kprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kprobe,
false /* retprobe */,
SYS_NANOSLEEP_KPROBE_NAME);
@@ -62,6 +86,13 @@ void test_attach_probe(void)
goto cleanup;
skel->links.handle_kretprobe = kretprobe_link;
+ /* auto-attachable kprobe and kretprobe */
+ skel->links.handle_kprobe_auto = bpf_program__attach(skel->progs.handle_kprobe_auto);
+ ASSERT_OK_PTR(skel->links.handle_kprobe_auto, "attach_kprobe_auto");
+
+ skel->links.handle_kretprobe_auto = bpf_program__attach(skel->progs.handle_kretprobe_auto);
+ ASSERT_OK_PTR(skel->links.handle_kretprobe_auto, "attach_kretprobe_auto");
+
if (!legacy)
ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before");
@@ -90,26 +121,107 @@ void test_attach_probe(void)
goto cleanup;
skel->links.handle_uretprobe = uretprobe_link;
- /* trigger & validate kprobe && kretprobe */
- usleep(1);
+ /* verify auto-attach fails for old-style uprobe definition */
+ uprobe_err_link = bpf_program__attach(skel->progs.handle_uprobe_byname);
+ if (!ASSERT_EQ(libbpf_get_error(uprobe_err_link), -EOPNOTSUPP,
+ "auto-attach should fail for old-style name"))
+ goto cleanup;
- if (CHECK(skel->bss->kprobe_res != 1, "check_kprobe_res",
- "wrong kprobe res: %d\n", skel->bss->kprobe_res))
+ uprobe_opts.func_name = "trigger_func2";
+ uprobe_opts.retprobe = false;
+ uprobe_opts.ref_ctr_offset = 0;
+ skel->links.handle_uprobe_byname =
+ bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname,
+ 0 /* this pid */,
+ "/proc/self/exe",
+ 0, &uprobe_opts);
+ if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname, "attach_uprobe_byname"))
goto cleanup;
- if (CHECK(skel->bss->kretprobe_res != 2, "check_kretprobe_res",
- "wrong kretprobe res: %d\n", skel->bss->kretprobe_res))
+
+ /* verify auto-attach works */
+ skel->links.handle_uretprobe_byname =
+ bpf_program__attach(skel->progs.handle_uretprobe_byname);
+ if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname, "attach_uretprobe_byname"))
goto cleanup;
- /* trigger & validate uprobe & uretprobe */
- trigger_func();
+ /* test attach by name for a library function, using the library
+ * as the binary argument. libc.so.6 will be resolved via dlopen()/dlinfo().
+ */
+ uprobe_opts.func_name = "malloc";
+ uprobe_opts.retprobe = false;
+ skel->links.handle_uprobe_byname2 =
+ bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname2,
+ 0 /* this pid */,
+ "libc.so.6",
+ 0, &uprobe_opts);
+ if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname2, "attach_uprobe_byname2"))
+ goto cleanup;
+
+ uprobe_opts.func_name = "free";
+ uprobe_opts.retprobe = true;
+ skel->links.handle_uretprobe_byname2 =
+ bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_byname2,
+ -1 /* any pid */,
+ "libc.so.6",
+ 0, &uprobe_opts);
+ if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname2, "attach_uretprobe_byname2"))
+ goto cleanup;
+
+ /* sleepable kprobes should not attach successfully */
+ skel->links.handle_kprobe_sleepable = bpf_program__attach(skel->progs.handle_kprobe_sleepable);
+ if (!ASSERT_ERR_PTR(skel->links.handle_kprobe_sleepable, "attach_kprobe_sleepable"))
+ goto cleanup;
+
+ /* test sleepable uprobe and uretprobe variants */
+ skel->links.handle_uprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uprobe_byname3_sleepable);
+ if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3_sleepable, "attach_uprobe_byname3_sleepable"))
+ goto cleanup;
+
+ skel->links.handle_uprobe_byname3 = bpf_program__attach(skel->progs.handle_uprobe_byname3);
+ if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3, "attach_uprobe_byname3"))
+ goto cleanup;
- if (CHECK(skel->bss->uprobe_res != 3, "check_uprobe_res",
- "wrong uprobe res: %d\n", skel->bss->uprobe_res))
+ skel->links.handle_uretprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uretprobe_byname3_sleepable);
+ if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3_sleepable, "attach_uretprobe_byname3_sleepable"))
goto cleanup;
- if (CHECK(skel->bss->uretprobe_res != 4, "check_uretprobe_res",
- "wrong uretprobe res: %d\n", skel->bss->uretprobe_res))
+
+ skel->links.handle_uretprobe_byname3 = bpf_program__attach(skel->progs.handle_uretprobe_byname3);
+ if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3, "attach_uretprobe_byname3"))
goto cleanup;
+ skel->bss->user_ptr = test_data;
+
+ /* trigger & validate kprobe && kretprobe */
+ usleep(1);
+
+ /* trigger & validate shared library u[ret]probes attached by name */
+ mem = malloc(1);
+ free(mem);
+
+ /* trigger & validate uprobe & uretprobe */
+ trigger_func();
+
+ /* trigger & validate uprobe attached by name */
+ trigger_func2();
+
+ /* trigger & validate sleepable uprobe attached by name */
+ trigger_func3();
+
+ ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
+ ASSERT_EQ(skel->bss->kprobe2_res, 11, "check_kprobe_auto_res");
+ ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
+ ASSERT_EQ(skel->bss->kretprobe2_res, 22, "check_kretprobe_auto_res");
+ ASSERT_EQ(skel->bss->uprobe_res, 3, "check_uprobe_res");
+ ASSERT_EQ(skel->bss->uretprobe_res, 4, "check_uretprobe_res");
+ ASSERT_EQ(skel->bss->uprobe_byname_res, 5, "check_uprobe_byname_res");
+ ASSERT_EQ(skel->bss->uretprobe_byname_res, 6, "check_uretprobe_byname_res");
+ ASSERT_EQ(skel->bss->uprobe_byname2_res, 7, "check_uprobe_byname2_res");
+ ASSERT_EQ(skel->bss->uretprobe_byname2_res, 8, "check_uretprobe_byname2_res");
+ ASSERT_EQ(skel->bss->uprobe_byname3_sleepable_res, 9, "check_uprobe_byname3_sleepable_res");
+ ASSERT_EQ(skel->bss->uprobe_byname3_res, 10, "check_uprobe_byname3_res");
+ ASSERT_EQ(skel->bss->uretprobe_byname3_sleepable_res, 11, "check_uretprobe_byname3_sleepable_res");
+ ASSERT_EQ(skel->bss->uretprobe_byname3_res, 12, "check_uretprobe_byname3_res");
+
cleanup:
test_attach_probe__destroy(skel);
ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_cleanup");
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
index 923a6139b2d8..2974b44f80fa 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
@@ -4,8 +4,11 @@
#include <pthread.h>
#include <sched.h>
#include <sys/syscall.h>
+#include <sys/mman.h>
#include <unistd.h>
#include <test_progs.h>
+#include <network_helpers.h>
+#include <bpf/btf.h>
#include "test_bpf_cookie.skel.h"
#include "kprobe_multi.skel.h"
@@ -118,24 +121,24 @@ static void kprobe_multi_link_api_subtest(void)
})
GET_ADDR("bpf_fentry_test1", addrs[0]);
- GET_ADDR("bpf_fentry_test2", addrs[1]);
- GET_ADDR("bpf_fentry_test3", addrs[2]);
- GET_ADDR("bpf_fentry_test4", addrs[3]);
- GET_ADDR("bpf_fentry_test5", addrs[4]);
- GET_ADDR("bpf_fentry_test6", addrs[5]);
- GET_ADDR("bpf_fentry_test7", addrs[6]);
+ GET_ADDR("bpf_fentry_test3", addrs[1]);
+ GET_ADDR("bpf_fentry_test4", addrs[2]);
+ GET_ADDR("bpf_fentry_test5", addrs[3]);
+ GET_ADDR("bpf_fentry_test6", addrs[4]);
+ GET_ADDR("bpf_fentry_test7", addrs[5]);
+ GET_ADDR("bpf_fentry_test2", addrs[6]);
GET_ADDR("bpf_fentry_test8", addrs[7]);
#undef GET_ADDR
- cookies[0] = 1;
- cookies[1] = 2;
- cookies[2] = 3;
- cookies[3] = 4;
- cookies[4] = 5;
- cookies[5] = 6;
- cookies[6] = 7;
- cookies[7] = 8;
+ cookies[0] = 1; /* bpf_fentry_test1 */
+ cookies[1] = 2; /* bpf_fentry_test3 */
+ cookies[2] = 3; /* bpf_fentry_test4 */
+ cookies[3] = 4; /* bpf_fentry_test5 */
+ cookies[4] = 5; /* bpf_fentry_test6 */
+ cookies[5] = 6; /* bpf_fentry_test7 */
+ cookies[6] = 7; /* bpf_fentry_test2 */
+ cookies[7] = 8; /* bpf_fentry_test8 */
opts.kprobe_multi.addrs = (const unsigned long *) &addrs;
opts.kprobe_multi.cnt = ARRAY_SIZE(addrs);
@@ -146,14 +149,14 @@ static void kprobe_multi_link_api_subtest(void)
if (!ASSERT_GE(link1_fd, 0, "link1_fd"))
goto cleanup;
- cookies[0] = 8;
- cookies[1] = 7;
- cookies[2] = 6;
- cookies[3] = 5;
- cookies[4] = 4;
- cookies[5] = 3;
- cookies[6] = 2;
- cookies[7] = 1;
+ cookies[0] = 8; /* bpf_fentry_test1 */
+ cookies[1] = 7; /* bpf_fentry_test3 */
+ cookies[2] = 6; /* bpf_fentry_test4 */
+ cookies[3] = 5; /* bpf_fentry_test5 */
+ cookies[4] = 4; /* bpf_fentry_test6 */
+ cookies[5] = 3; /* bpf_fentry_test7 */
+ cookies[6] = 2; /* bpf_fentry_test2 */
+ cookies[7] = 1; /* bpf_fentry_test8 */
opts.kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN;
prog_fd = bpf_program__fd(skel->progs.test_kretprobe);
@@ -178,12 +181,12 @@ static void kprobe_multi_attach_api_subtest(void)
struct kprobe_multi *skel = NULL;
const char *syms[8] = {
"bpf_fentry_test1",
- "bpf_fentry_test2",
"bpf_fentry_test3",
"bpf_fentry_test4",
"bpf_fentry_test5",
"bpf_fentry_test6",
"bpf_fentry_test7",
+ "bpf_fentry_test2",
"bpf_fentry_test8",
};
__u64 cookies[8];
@@ -195,14 +198,14 @@ static void kprobe_multi_attach_api_subtest(void)
skel->bss->pid = getpid();
skel->bss->test_cookie = true;
- cookies[0] = 1;
- cookies[1] = 2;
- cookies[2] = 3;
- cookies[3] = 4;
- cookies[4] = 5;
- cookies[5] = 6;
- cookies[6] = 7;
- cookies[7] = 8;
+ cookies[0] = 1; /* bpf_fentry_test1 */
+ cookies[1] = 2; /* bpf_fentry_test3 */
+ cookies[2] = 3; /* bpf_fentry_test4 */
+ cookies[3] = 4; /* bpf_fentry_test5 */
+ cookies[4] = 5; /* bpf_fentry_test6 */
+ cookies[5] = 6; /* bpf_fentry_test7 */
+ cookies[6] = 7; /* bpf_fentry_test2 */
+ cookies[7] = 8; /* bpf_fentry_test8 */
opts.syms = syms;
opts.cnt = ARRAY_SIZE(syms);
@@ -213,14 +216,14 @@ static void kprobe_multi_attach_api_subtest(void)
if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts"))
goto cleanup;
- cookies[0] = 8;
- cookies[1] = 7;
- cookies[2] = 6;
- cookies[3] = 5;
- cookies[4] = 4;
- cookies[5] = 3;
- cookies[6] = 2;
- cookies[7] = 1;
+ cookies[0] = 8; /* bpf_fentry_test1 */
+ cookies[1] = 7; /* bpf_fentry_test3 */
+ cookies[2] = 6; /* bpf_fentry_test4 */
+ cookies[3] = 5; /* bpf_fentry_test5 */
+ cookies[4] = 4; /* bpf_fentry_test6 */
+ cookies[5] = 3; /* bpf_fentry_test7 */
+ cookies[6] = 2; /* bpf_fentry_test2 */
+ cookies[7] = 1; /* bpf_fentry_test8 */
opts.retprobe = true;
@@ -410,6 +413,88 @@ cleanup:
bpf_link__destroy(link);
}
+static void tracing_subtest(struct test_bpf_cookie *skel)
+{
+ __u64 cookie;
+ int prog_fd;
+ int fentry_fd = -1, fexit_fd = -1, fmod_ret_fd = -1;
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ LIBBPF_OPTS(bpf_link_create_opts, link_opts);
+
+ skel->bss->fentry_res = 0;
+ skel->bss->fexit_res = 0;
+
+ cookie = 0x10000000000000L;
+ prog_fd = bpf_program__fd(skel->progs.fentry_test1);
+ link_opts.tracing.cookie = cookie;
+ fentry_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FENTRY, &link_opts);
+ if (!ASSERT_GE(fentry_fd, 0, "fentry.link_create"))
+ goto cleanup;
+
+ cookie = 0x20000000000000L;
+ prog_fd = bpf_program__fd(skel->progs.fexit_test1);
+ link_opts.tracing.cookie = cookie;
+ fexit_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FEXIT, &link_opts);
+ if (!ASSERT_GE(fexit_fd, 0, "fexit.link_create"))
+ goto cleanup;
+
+ cookie = 0x30000000000000L;
+ prog_fd = bpf_program__fd(skel->progs.fmod_ret_test);
+ link_opts.tracing.cookie = cookie;
+ fmod_ret_fd = bpf_link_create(prog_fd, 0, BPF_MODIFY_RETURN, &link_opts);
+ if (!ASSERT_GE(fmod_ret_fd, 0, "fmod_ret.link_create"))
+ goto cleanup;
+
+ prog_fd = bpf_program__fd(skel->progs.fentry_test1);
+ bpf_prog_test_run_opts(prog_fd, &opts);
+
+ prog_fd = bpf_program__fd(skel->progs.fmod_ret_test);
+ bpf_prog_test_run_opts(prog_fd, &opts);
+
+ ASSERT_EQ(skel->bss->fentry_res, 0x10000000000000L, "fentry_res");
+ ASSERT_EQ(skel->bss->fexit_res, 0x20000000000000L, "fexit_res");
+ ASSERT_EQ(skel->bss->fmod_ret_res, 0x30000000000000L, "fmod_ret_res");
+
+cleanup:
+ if (fentry_fd >= 0)
+ close(fentry_fd);
+ if (fexit_fd >= 0)
+ close(fexit_fd);
+ if (fmod_ret_fd >= 0)
+ close(fmod_ret_fd);
+}
+
+int stack_mprotect(void);
+
+static void lsm_subtest(struct test_bpf_cookie *skel)
+{
+ __u64 cookie;
+ int prog_fd;
+ int lsm_fd = -1;
+ LIBBPF_OPTS(bpf_link_create_opts, link_opts);
+
+ skel->bss->lsm_res = 0;
+
+ cookie = 0x90000000000090L;
+ prog_fd = bpf_program__fd(skel->progs.test_int_hook);
+ link_opts.tracing.cookie = cookie;
+ lsm_fd = bpf_link_create(prog_fd, 0, BPF_LSM_MAC, &link_opts);
+ if (!ASSERT_GE(lsm_fd, 0, "lsm.link_create"))
+ goto cleanup;
+
+ stack_mprotect();
+ if (!ASSERT_EQ(errno, EPERM, "stack_mprotect"))
+ goto cleanup;
+
+ usleep(1);
+
+ ASSERT_EQ(skel->bss->lsm_res, 0x90000000000090L, "fentry_res");
+
+cleanup:
+ if (lsm_fd >= 0)
+ close(lsm_fd);
+}
+
void test_bpf_cookie(void)
{
struct test_bpf_cookie *skel;
@@ -432,6 +517,10 @@ void test_bpf_cookie(void)
tp_subtest(skel);
if (test__start_subtest("perf_event"))
pe_subtest(skel);
+ if (test__start_subtest("trampoline"))
+ tracing_subtest(skel);
+ if (test__start_subtest("lsm"))
+ lsm_subtest(skel);
test_bpf_cookie__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index 5142a7d130b2..a33874b081b6 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -26,6 +26,8 @@
#include "bpf_iter_bpf_sk_storage_map.skel.h"
#include "bpf_iter_test_kern5.skel.h"
#include "bpf_iter_test_kern6.skel.h"
+#include "bpf_iter_bpf_link.skel.h"
+#include "bpf_iter_ksym.skel.h"
static int duration;
@@ -34,8 +36,7 @@ static void test_btf_id_or_null(void)
struct bpf_iter_test_kern3 *skel;
skel = bpf_iter_test_kern3__open_and_load();
- if (CHECK(skel, "bpf_iter_test_kern3__open_and_load",
- "skeleton open_and_load unexpectedly succeeded\n")) {
+ if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) {
bpf_iter_test_kern3__destroy(skel);
return;
}
@@ -52,7 +53,7 @@ static void do_dummy_read(struct bpf_program *prog)
return;
iter_fd = bpf_iter_create(bpf_link__fd(link));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
/* not check contents, but ensure read() ends without error */
@@ -87,8 +88,7 @@ static void test_ipv6_route(void)
struct bpf_iter_ipv6_route *skel;
skel = bpf_iter_ipv6_route__open_and_load();
- if (CHECK(!skel, "bpf_iter_ipv6_route__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load"))
return;
do_dummy_read(skel->progs.dump_ipv6_route);
@@ -101,8 +101,7 @@ static void test_netlink(void)
struct bpf_iter_netlink *skel;
skel = bpf_iter_netlink__open_and_load();
- if (CHECK(!skel, "bpf_iter_netlink__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_netlink__open_and_load"))
return;
do_dummy_read(skel->progs.dump_netlink);
@@ -115,8 +114,7 @@ static void test_bpf_map(void)
struct bpf_iter_bpf_map *skel;
skel = bpf_iter_bpf_map__open_and_load();
- if (CHECK(!skel, "bpf_iter_bpf_map__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_map__open_and_load"))
return;
do_dummy_read(skel->progs.dump_bpf_map);
@@ -129,8 +127,7 @@ static void test_task(void)
struct bpf_iter_task *skel;
skel = bpf_iter_task__open_and_load();
- if (CHECK(!skel, "bpf_iter_task__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
return;
do_dummy_read(skel->progs.dump_task);
@@ -161,8 +158,7 @@ static void test_task_stack(void)
struct bpf_iter_task_stack *skel;
skel = bpf_iter_task_stack__open_and_load();
- if (CHECK(!skel, "bpf_iter_task_stack__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_task_stack__open_and_load"))
return;
do_dummy_read(skel->progs.dump_task_stack);
@@ -183,24 +179,22 @@ static void test_task_file(void)
void *ret;
skel = bpf_iter_task_file__open_and_load();
- if (CHECK(!skel, "bpf_iter_task_file__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_task_file__open_and_load"))
return;
skel->bss->tgid = getpid();
- if (CHECK(pthread_create(&thread_id, NULL, &do_nothing, NULL),
- "pthread_create", "pthread_create failed\n"))
+ if (!ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing, NULL),
+ "pthread_create"))
goto done;
do_dummy_read(skel->progs.dump_task_file);
- if (CHECK(pthread_join(thread_id, &ret) || ret != NULL,
- "pthread_join", "pthread_join failed\n"))
+ if (!ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL,
+ "pthread_join"))
goto done;
- CHECK(skel->bss->count != 0, "check_count",
- "invalid non pthread file visit count %d\n", skel->bss->count);
+ ASSERT_EQ(skel->bss->count, 0, "check_count");
done:
bpf_iter_task_file__destroy(skel);
@@ -224,7 +218,7 @@ static int do_btf_read(struct bpf_iter_task_btf *skel)
return ret;
iter_fd = bpf_iter_create(bpf_link__fd(link));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ);
@@ -238,9 +232,8 @@ static int do_btf_read(struct bpf_iter_task_btf *skel)
if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno)))
goto free_link;
- CHECK(strstr(taskbuf, "(struct task_struct)") == NULL,
- "check for btf representation of task_struct in iter data",
- "struct task_struct not found");
+ ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)",
+ "check for btf representation of task_struct in iter data");
free_link:
if (iter_fd > 0)
close(iter_fd);
@@ -255,8 +248,7 @@ static void test_task_btf(void)
int ret;
skel = bpf_iter_task_btf__open_and_load();
- if (CHECK(!skel, "bpf_iter_task_btf__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_task_btf__open_and_load"))
return;
bss = skel->bss;
@@ -265,12 +257,10 @@ static void test_task_btf(void)
if (ret)
goto cleanup;
- if (CHECK(bss->tasks == 0, "check if iterated over tasks",
- "no task iteration, did BPF program run?\n"))
+ if (!ASSERT_NEQ(bss->tasks, 0, "no task iteration, did BPF program run?"))
goto cleanup;
- CHECK(bss->seq_err != 0, "check for unexpected err",
- "bpf_seq_printf_btf returned %ld", bss->seq_err);
+ ASSERT_EQ(bss->seq_err, 0, "check for unexpected err");
cleanup:
bpf_iter_task_btf__destroy(skel);
@@ -281,8 +271,7 @@ static void test_tcp4(void)
struct bpf_iter_tcp4 *skel;
skel = bpf_iter_tcp4__open_and_load();
- if (CHECK(!skel, "bpf_iter_tcp4__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp4__open_and_load"))
return;
do_dummy_read(skel->progs.dump_tcp4);
@@ -295,8 +284,7 @@ static void test_tcp6(void)
struct bpf_iter_tcp6 *skel;
skel = bpf_iter_tcp6__open_and_load();
- if (CHECK(!skel, "bpf_iter_tcp6__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp6__open_and_load"))
return;
do_dummy_read(skel->progs.dump_tcp6);
@@ -309,8 +297,7 @@ static void test_udp4(void)
struct bpf_iter_udp4 *skel;
skel = bpf_iter_udp4__open_and_load();
- if (CHECK(!skel, "bpf_iter_udp4__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_udp4__open_and_load"))
return;
do_dummy_read(skel->progs.dump_udp4);
@@ -323,8 +310,7 @@ static void test_udp6(void)
struct bpf_iter_udp6 *skel;
skel = bpf_iter_udp6__open_and_load();
- if (CHECK(!skel, "bpf_iter_udp6__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_udp6__open_and_load"))
return;
do_dummy_read(skel->progs.dump_udp6);
@@ -349,7 +335,7 @@ static void test_unix(void)
static int do_read_with_fd(int iter_fd, const char *expected,
bool read_one_char)
{
- int err = -1, len, read_buf_len, start;
+ int len, read_buf_len, start;
char buf[16] = {};
read_buf_len = read_one_char ? 1 : 16;
@@ -363,9 +349,7 @@ static int do_read_with_fd(int iter_fd, const char *expected,
if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
return -1;
- err = strcmp(buf, expected);
- if (CHECK(err, "read", "incorrect read result: buf %s, expected %s\n",
- buf, expected))
+ if (!ASSERT_STREQ(buf, expected, "read"))
return -1;
return 0;
@@ -378,19 +362,17 @@ static void test_anon_iter(bool read_one_char)
int iter_fd, err;
skel = bpf_iter_test_kern1__open_and_load();
- if (CHECK(!skel, "bpf_iter_test_kern1__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern1__open_and_load"))
return;
err = bpf_iter_test_kern1__attach(skel);
- if (CHECK(err, "bpf_iter_test_kern1__attach",
- "skeleton attach failed\n")) {
+ if (!ASSERT_OK(err, "bpf_iter_test_kern1__attach")) {
goto out;
}
link = skel->links.dump_task;
iter_fd = bpf_iter_create(bpf_link__fd(link));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto out;
do_read_with_fd(iter_fd, "abcd", read_one_char);
@@ -423,8 +405,7 @@ static void test_file_iter(void)
int err;
skel1 = bpf_iter_test_kern1__open_and_load();
- if (CHECK(!skel1, "bpf_iter_test_kern1__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel1, "bpf_iter_test_kern1__open_and_load"))
return;
link = bpf_program__attach_iter(skel1->progs.dump_task, NULL);
@@ -447,12 +428,11 @@ static void test_file_iter(void)
* should change.
*/
skel2 = bpf_iter_test_kern2__open_and_load();
- if (CHECK(!skel2, "bpf_iter_test_kern2__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel2, "bpf_iter_test_kern2__open_and_load"))
goto unlink_path;
err = bpf_link__update_program(link, skel2->progs.dump_task);
- if (CHECK(err, "update_prog", "update_prog failed\n"))
+ if (!ASSERT_OK(err, "update_prog"))
goto destroy_skel2;
do_read(path, "ABCD");
@@ -478,8 +458,7 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
char *buf;
skel = bpf_iter_test_kern4__open();
- if (CHECK(!skel, "bpf_iter_test_kern4__open",
- "skeleton open failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern4__open"))
return;
/* create two maps: bpf program will only do bpf_seq_write
@@ -515,8 +494,8 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
}
skel->rodata->ret1 = ret1;
- if (CHECK(bpf_iter_test_kern4__load(skel),
- "bpf_iter_test_kern4__load", "skeleton load failed\n"))
+ if (!ASSERT_OK(bpf_iter_test_kern4__load(skel),
+ "bpf_iter_test_kern4__load"))
goto free_map2;
/* setup filtering map_id in bpf program */
@@ -538,7 +517,7 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
goto free_map2;
iter_fd = bpf_iter_create(bpf_link__fd(link));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
buf = malloc(expected_read_len);
@@ -574,22 +553,16 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
goto free_buf;
}
- if (CHECK(total_read_len != expected_read_len, "read",
- "total len %u, expected len %u\n", total_read_len,
- expected_read_len))
+ if (!ASSERT_EQ(total_read_len, expected_read_len, "read"))
goto free_buf;
- if (CHECK(skel->bss->map1_accessed != 1, "map1_accessed",
- "expected 1 actual %d\n", skel->bss->map1_accessed))
+ if (!ASSERT_EQ(skel->bss->map1_accessed, 1, "map1_accessed"))
goto free_buf;
- if (CHECK(skel->bss->map2_accessed != 2, "map2_accessed",
- "expected 2 actual %d\n", skel->bss->map2_accessed))
+ if (!ASSERT_EQ(skel->bss->map2_accessed, 2, "map2_accessed"))
goto free_buf;
- CHECK(skel->bss->map2_seqnum1 != skel->bss->map2_seqnum2,
- "map2_seqnum", "two different seqnum %lld %lld\n",
- skel->bss->map2_seqnum1, skel->bss->map2_seqnum2);
+ ASSERT_EQ(skel->bss->map2_seqnum1, skel->bss->map2_seqnum2, "map2_seqnum");
free_buf:
free(buf);
@@ -622,15 +595,13 @@ static void test_bpf_hash_map(void)
char buf[64];
skel = bpf_iter_bpf_hash_map__open();
- if (CHECK(!skel, "bpf_iter_bpf_hash_map__open",
- "skeleton open failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_hash_map__open"))
return;
skel->bss->in_test_mode = true;
err = bpf_iter_bpf_hash_map__load(skel);
- if (CHECK(!skel, "bpf_iter_bpf_hash_map__load",
- "skeleton load failed\n"))
+ if (!ASSERT_OK(err, "bpf_iter_bpf_hash_map__load"))
goto out;
/* iterator with hashmap2 and hashmap3 should fail */
@@ -659,7 +630,7 @@ static void test_bpf_hash_map(void)
expected_val += val;
err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
- if (CHECK(err, "map_update", "map_update failed\n"))
+ if (!ASSERT_OK(err, "map_update"))
goto out;
}
@@ -669,7 +640,7 @@ static void test_bpf_hash_map(void)
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
/* do some tests */
@@ -679,17 +650,11 @@ static void test_bpf_hash_map(void)
goto close_iter;
/* test results */
- if (CHECK(skel->bss->key_sum_a != expected_key_a,
- "key_sum_a", "got %u expected %u\n",
- skel->bss->key_sum_a, expected_key_a))
+ if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
goto close_iter;
- if (CHECK(skel->bss->key_sum_b != expected_key_b,
- "key_sum_b", "got %u expected %u\n",
- skel->bss->key_sum_b, expected_key_b))
+ if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
goto close_iter;
- if (CHECK(skel->bss->val_sum != expected_val,
- "val_sum", "got %llu expected %llu\n",
- skel->bss->val_sum, expected_val))
+ if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
goto close_iter;
close_iter:
@@ -718,16 +683,14 @@ static void test_bpf_percpu_hash_map(void)
void *val;
skel = bpf_iter_bpf_percpu_hash_map__open();
- if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__open",
- "skeleton open failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__open"))
return;
skel->rodata->num_cpus = bpf_num_possible_cpus();
val = malloc(8 * bpf_num_possible_cpus());
err = bpf_iter_bpf_percpu_hash_map__load(skel);
- if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__load",
- "skeleton load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load"))
goto out;
/* update map values here */
@@ -745,7 +708,7 @@ static void test_bpf_percpu_hash_map(void)
}
err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY);
- if (CHECK(err, "map_update", "map_update failed\n"))
+ if (!ASSERT_OK(err, "map_update"))
goto out;
}
@@ -758,7 +721,7 @@ static void test_bpf_percpu_hash_map(void)
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
/* do some tests */
@@ -768,17 +731,11 @@ static void test_bpf_percpu_hash_map(void)
goto close_iter;
/* test results */
- if (CHECK(skel->bss->key_sum_a != expected_key_a,
- "key_sum_a", "got %u expected %u\n",
- skel->bss->key_sum_a, expected_key_a))
+ if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
goto close_iter;
- if (CHECK(skel->bss->key_sum_b != expected_key_b,
- "key_sum_b", "got %u expected %u\n",
- skel->bss->key_sum_b, expected_key_b))
+ if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
goto close_iter;
- if (CHECK(skel->bss->val_sum != expected_val,
- "val_sum", "got %u expected %u\n",
- skel->bss->val_sum, expected_val))
+ if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
goto close_iter;
close_iter:
@@ -803,8 +760,7 @@ static void test_bpf_array_map(void)
int len, start;
skel = bpf_iter_bpf_array_map__open_and_load();
- if (CHECK(!skel, "bpf_iter_bpf_array_map__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
return;
map_fd = bpf_map__fd(skel->maps.arraymap1);
@@ -817,7 +773,7 @@ static void test_bpf_array_map(void)
first_val = val;
err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY);
- if (CHECK(err, "map_update", "map_update failed\n"))
+ if (!ASSERT_OK(err, "map_update"))
goto out;
}
@@ -830,7 +786,7 @@ static void test_bpf_array_map(void)
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
/* do some tests */
@@ -850,21 +806,16 @@ static void test_bpf_array_map(void)
res_first_key, res_first_val, first_val))
goto close_iter;
- if (CHECK(skel->bss->key_sum != expected_key,
- "key_sum", "got %u expected %u\n",
- skel->bss->key_sum, expected_key))
+ if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
goto close_iter;
- if (CHECK(skel->bss->val_sum != expected_val,
- "val_sum", "got %llu expected %llu\n",
- skel->bss->val_sum, expected_val))
+ if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
goto close_iter;
for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
err = bpf_map_lookup_elem(map_fd, &i, &val);
- if (CHECK(err, "map_lookup", "map_lookup failed\n"))
+ if (!ASSERT_OK(err, "map_lookup"))
goto out;
- if (CHECK(i != val, "invalid_val",
- "got value %llu expected %u\n", val, i))
+ if (!ASSERT_EQ(i, val, "invalid_val"))
goto out;
}
@@ -889,16 +840,14 @@ static void test_bpf_percpu_array_map(void)
int len;
skel = bpf_iter_bpf_percpu_array_map__open();
- if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__open",
- "skeleton open failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__open"))
return;
skel->rodata->num_cpus = bpf_num_possible_cpus();
val = malloc(8 * bpf_num_possible_cpus());
err = bpf_iter_bpf_percpu_array_map__load(skel);
- if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__load",
- "skeleton load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load"))
goto out;
/* update map values here */
@@ -912,7 +861,7 @@ static void test_bpf_percpu_array_map(void)
}
err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY);
- if (CHECK(err, "map_update", "map_update failed\n"))
+ if (!ASSERT_OK(err, "map_update"))
goto out;
}
@@ -925,7 +874,7 @@ static void test_bpf_percpu_array_map(void)
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
/* do some tests */
@@ -935,13 +884,9 @@ static void test_bpf_percpu_array_map(void)
goto close_iter;
/* test results */
- if (CHECK(skel->bss->key_sum != expected_key,
- "key_sum", "got %u expected %u\n",
- skel->bss->key_sum, expected_key))
+ if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
goto close_iter;
- if (CHECK(skel->bss->val_sum != expected_val,
- "val_sum", "got %u expected %u\n",
- skel->bss->val_sum, expected_val))
+ if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
goto close_iter;
close_iter:
@@ -966,17 +911,16 @@ static void test_bpf_sk_storage_delete(void)
char buf[64];
skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
- if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
return;
map_fd = bpf_map__fd(skel->maps.sk_stg_map);
sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
- if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno))
+ if (!ASSERT_GE(sock_fd, 0, "socket"))
goto out;
err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
- if (CHECK(err, "map_update", "map_update failed\n"))
+ if (!ASSERT_OK(err, "map_update"))
goto out;
memset(&linfo, 0, sizeof(linfo));
@@ -989,7 +933,7 @@ static void test_bpf_sk_storage_delete(void)
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
/* do some tests */
@@ -1027,22 +971,21 @@ static void test_bpf_sk_storage_get(void)
int sock_fd = -1;
skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
- if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
return;
sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
- if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno))
+ if (!ASSERT_GE(sock_fd, 0, "socket"))
goto out;
err = listen(sock_fd, 1);
- if (CHECK(err != 0, "listen", "errno: %d\n", errno))
+ if (!ASSERT_OK(err, "listen"))
goto close_socket;
map_fd = bpf_map__fd(skel->maps.sk_stg_map);
err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
- if (CHECK(err, "bpf_map_update_elem", "map_update_failed\n"))
+ if (!ASSERT_OK(err, "bpf_map_update_elem"))
goto close_socket;
do_dummy_read(skel->progs.fill_socket_owner);
@@ -1078,15 +1021,14 @@ static void test_bpf_sk_storage_map(void)
char buf[64];
skel = bpf_iter_bpf_sk_storage_map__open_and_load();
- if (CHECK(!skel, "bpf_iter_bpf_sk_storage_map__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
return;
map_fd = bpf_map__fd(skel->maps.sk_stg_map);
num_sockets = ARRAY_SIZE(sock_fd);
for (i = 0; i < num_sockets; i++) {
sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0);
- if (CHECK(sock_fd[i] < 0, "socket", "errno: %d\n", errno))
+ if (!ASSERT_GE(sock_fd[i], 0, "socket"))
goto out;
val = i + 1;
@@ -1094,7 +1036,7 @@ static void test_bpf_sk_storage_map(void)
err = bpf_map_update_elem(map_fd, &sock_fd[i], &val,
BPF_NOEXIST);
- if (CHECK(err, "map_update", "map_update failed\n"))
+ if (!ASSERT_OK(err, "map_update"))
goto out;
}
@@ -1107,7 +1049,7 @@ static void test_bpf_sk_storage_map(void)
goto out;
iter_fd = bpf_iter_create(bpf_link__fd(link));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto free_link;
/* do some tests */
@@ -1117,14 +1059,10 @@ static void test_bpf_sk_storage_map(void)
goto close_iter;
/* test results */
- if (CHECK(skel->bss->ipv6_sk_count != num_sockets,
- "ipv6_sk_count", "got %u expected %u\n",
- skel->bss->ipv6_sk_count, num_sockets))
+ if (!ASSERT_EQ(skel->bss->ipv6_sk_count, num_sockets, "ipv6_sk_count"))
goto close_iter;
- if (CHECK(skel->bss->val_sum != expected_val,
- "val_sum", "got %u expected %u\n",
- skel->bss->val_sum, expected_val))
+ if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
goto close_iter;
close_iter:
@@ -1147,8 +1085,7 @@ static void test_rdonly_buf_out_of_bound(void)
struct bpf_link *link;
skel = bpf_iter_test_kern5__open_and_load();
- if (CHECK(!skel, "bpf_iter_test_kern5__open_and_load",
- "skeleton open_and_load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern5__open_and_load"))
return;
memset(&linfo, 0, sizeof(linfo));
@@ -1167,11 +1104,36 @@ static void test_buf_neg_offset(void)
struct bpf_iter_test_kern6 *skel;
skel = bpf_iter_test_kern6__open_and_load();
- if (CHECK(skel, "bpf_iter_test_kern6__open_and_load",
- "skeleton open_and_load unexpected success\n"))
+ if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern6__open_and_load"))
bpf_iter_test_kern6__destroy(skel);
}
+static void test_link_iter(void)
+{
+ struct bpf_iter_bpf_link *skel;
+
+ skel = bpf_iter_bpf_link__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_link__open_and_load"))
+ return;
+
+ do_dummy_read(skel->progs.dump_bpf_link);
+
+ bpf_iter_bpf_link__destroy(skel);
+}
+
+static void test_ksym_iter(void)
+{
+ struct bpf_iter_ksym *skel;
+
+ skel = bpf_iter_ksym__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_ksym__open_and_load"))
+ return;
+
+ do_dummy_read(skel->progs.dump_ksym);
+
+ bpf_iter_ksym__destroy(skel);
+}
+
#define CMP_BUFFER_SIZE 1024
static char task_vma_output[CMP_BUFFER_SIZE];
static char proc_maps_output[CMP_BUFFER_SIZE];
@@ -1192,8 +1154,6 @@ static void str_strip_first_line(char *str)
*dst = '\0';
}
-#define min(a, b) ((a) < (b) ? (a) : (b))
-
static void test_task_vma(void)
{
int err, iter_fd = -1, proc_maps_fd = -1;
@@ -1202,13 +1162,13 @@ static void test_task_vma(void)
char maps_path[64];
skel = bpf_iter_task_vma__open();
- if (CHECK(!skel, "bpf_iter_task_vma__open", "skeleton open failed\n"))
+ if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open"))
return;
skel->bss->pid = getpid();
err = bpf_iter_task_vma__load(skel);
- if (CHECK(err, "bpf_iter_task_vma__load", "skeleton load failed\n"))
+ if (!ASSERT_OK(err, "bpf_iter_task_vma__load"))
goto out;
skel->links.proc_maps = bpf_program__attach_iter(
@@ -1220,7 +1180,7 @@ static void test_task_vma(void)
}
iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
- if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ if (!ASSERT_GE(iter_fd, 0, "create_iter"))
goto out;
/* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks
@@ -1229,10 +1189,10 @@ static void test_task_vma(void)
len = 0;
while (len < CMP_BUFFER_SIZE) {
err = read_fd_into_buffer(iter_fd, task_vma_output + len,
- min(read_size, CMP_BUFFER_SIZE - len));
+ MIN(read_size, CMP_BUFFER_SIZE - len));
if (!err)
break;
- if (CHECK(err < 0, "read_iter_fd", "read_iter_fd failed\n"))
+ if (!ASSERT_GE(err, 0, "read_iter_fd"))
goto out;
len += err;
}
@@ -1240,18 +1200,17 @@ static void test_task_vma(void)
/* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */
snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid);
proc_maps_fd = open(maps_path, O_RDONLY);
- if (CHECK(proc_maps_fd < 0, "open_proc_maps", "open_proc_maps failed\n"))
+ if (!ASSERT_GE(proc_maps_fd, 0, "open_proc_maps"))
goto out;
err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE);
- if (CHECK(err < 0, "read_prog_maps_fd", "read_prog_maps_fd failed\n"))
+ if (!ASSERT_GE(err, 0, "read_prog_maps_fd"))
goto out;
/* strip and compare the first line of the two files */
str_strip_first_line(task_vma_output);
str_strip_first_line(proc_maps_output);
- CHECK(strcmp(task_vma_output, proc_maps_output), "compare_output",
- "found mismatch\n");
+ ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output");
out:
close(proc_maps_fd);
close(iter_fd);
@@ -1320,4 +1279,8 @@ void test_bpf_iter(void)
test_rdonly_buf_out_of_bound();
if (test__start_subtest("buf-neg-offset"))
test_buf_neg_offset();
+ if (test__start_subtest("link-iter"))
+ test_link_iter();
+ if (test__start_subtest("ksym"))
+ test_ksym_iter();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_loop.c b/tools/testing/selftests/bpf/prog_tests/bpf_loop.c
index 380d7a2072e3..4cd8a25afe68 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_loop.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_loop.c
@@ -120,6 +120,64 @@ static void check_nested_calls(struct bpf_loop *skel)
bpf_link__destroy(link);
}
+static void check_non_constant_callback(struct bpf_loop *skel)
+{
+ struct bpf_link *link =
+ bpf_program__attach(skel->progs.prog_non_constant_callback);
+
+ if (!ASSERT_OK_PTR(link, "link"))
+ return;
+
+ skel->bss->callback_selector = 0x0F;
+ usleep(1);
+ ASSERT_EQ(skel->bss->g_output, 0x0F, "g_output #1");
+
+ skel->bss->callback_selector = 0xF0;
+ usleep(1);
+ ASSERT_EQ(skel->bss->g_output, 0xF0, "g_output #2");
+
+ bpf_link__destroy(link);
+}
+
+static void check_stack(struct bpf_loop *skel)
+{
+ struct bpf_link *link = bpf_program__attach(skel->progs.stack_check);
+ const int max_key = 12;
+ int key;
+ int map_fd;
+
+ if (!ASSERT_OK_PTR(link, "link"))
+ return;
+
+ map_fd = bpf_map__fd(skel->maps.map1);
+
+ if (!ASSERT_GE(map_fd, 0, "bpf_map__fd"))
+ goto out;
+
+ for (key = 1; key <= max_key; ++key) {
+ int val = key;
+ int err = bpf_map_update_elem(map_fd, &key, &val, BPF_NOEXIST);
+
+ if (!ASSERT_OK(err, "bpf_map_update_elem"))
+ goto out;
+ }
+
+ usleep(1);
+
+ for (key = 1; key <= max_key; ++key) {
+ int val;
+ int err = bpf_map_lookup_elem(map_fd, &key, &val);
+
+ if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
+ goto out;
+ if (!ASSERT_EQ(val, key + 1, "bad value in the map"))
+ goto out;
+ }
+
+out:
+ bpf_link__destroy(link);
+}
+
void test_bpf_loop(void)
{
struct bpf_loop *skel;
@@ -140,6 +198,10 @@ void test_bpf_loop(void)
check_invalid_flags(skel);
if (test__start_subtest("check_nested_calls"))
check_nested_calls(skel);
+ if (test__start_subtest("check_non_constant_callback"))
+ check_non_constant_callback(skel);
+ if (test__start_subtest("check_stack"))
+ check_stack(skel);
bpf_loop__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
index d43f548c572c..a4d0cc9d3367 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
@@ -36,13 +36,13 @@ struct test_config {
void (*bpf_destroy)(void *);
};
-enum test_state {
+enum bpf_test_state {
_TS_INVALID,
TS_MODULE_LOAD,
TS_MODULE_LOAD_FAIL,
};
-static _Atomic enum test_state state = _TS_INVALID;
+static _Atomic enum bpf_test_state state = _TS_INVALID;
static int sys_finit_module(int fd, const char *param_values, int flags)
{
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
index dd30b1e3a67c..7a74a1579076 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c
@@ -2,13 +2,29 @@
#include <test_progs.h>
#include <network_helpers.h>
#include "test_bpf_nf.skel.h"
+#include "test_bpf_nf_fail.skel.h"
+
+static char log_buf[1024 * 1024];
+
+struct {
+ const char *prog_name;
+ const char *err_msg;
+} test_bpf_nf_fail_tests[] = {
+ { "alloc_release", "kernel function bpf_ct_release args#0 expected pointer to STRUCT nf_conn but" },
+ { "insert_insert", "kernel function bpf_ct_insert_entry args#0 expected pointer to STRUCT nf_conn___init but" },
+ { "lookup_insert", "kernel function bpf_ct_insert_entry args#0 expected pointer to STRUCT nf_conn___init but" },
+ { "set_timeout_after_insert", "kernel function bpf_ct_set_timeout args#0 expected pointer to STRUCT nf_conn___init but" },
+ { "set_status_after_insert", "kernel function bpf_ct_set_status args#0 expected pointer to STRUCT nf_conn___init but" },
+ { "change_timeout_after_alloc", "kernel function bpf_ct_change_timeout args#0 expected pointer to STRUCT nf_conn but" },
+ { "change_status_after_alloc", "kernel function bpf_ct_change_status args#0 expected pointer to STRUCT nf_conn but" },
+};
enum {
TEST_XDP,
TEST_TC_BPF,
};
-void test_bpf_nf_ct(int mode)
+static void test_bpf_nf_ct(int mode)
{
struct test_bpf_nf *skel;
int prog_fd, err;
@@ -39,14 +55,60 @@ void test_bpf_nf_ct(int mode)
ASSERT_EQ(skel->bss->test_enonet_netns_id, -ENONET, "Test ENONET for bad but valid netns_id");
ASSERT_EQ(skel->bss->test_enoent_lookup, -ENOENT, "Test ENOENT for failed lookup");
ASSERT_EQ(skel->bss->test_eafnosupport, -EAFNOSUPPORT, "Test EAFNOSUPPORT for invalid len__tuple");
+ ASSERT_EQ(skel->data->test_alloc_entry, 0, "Test for alloc new entry");
+ ASSERT_EQ(skel->data->test_insert_entry, 0, "Test for insert new entry");
+ ASSERT_EQ(skel->data->test_succ_lookup, 0, "Test for successful lookup");
+ /* allow some tolerance for test_delta_timeout value to avoid races. */
+ ASSERT_GT(skel->bss->test_delta_timeout, 8, "Test for min ct timeout update");
+ ASSERT_LE(skel->bss->test_delta_timeout, 10, "Test for max ct timeout update");
+ /* expected status is IPS_SEEN_REPLY */
+ ASSERT_EQ(skel->bss->test_status, 2, "Test for ct status update ");
end:
test_bpf_nf__destroy(skel);
}
+static void test_bpf_nf_ct_fail(const char *prog_name, const char *err_msg)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
+ .kernel_log_size = sizeof(log_buf),
+ .kernel_log_level = 1);
+ struct test_bpf_nf_fail *skel;
+ struct bpf_program *prog;
+ int ret;
+
+ skel = test_bpf_nf_fail__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "test_bpf_nf_fail__open"))
+ return;
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto end;
+
+ bpf_program__set_autoload(prog, true);
+
+ ret = test_bpf_nf_fail__load(skel);
+ if (!ASSERT_ERR(ret, "test_bpf_nf_fail__load must fail"))
+ goto end;
+
+ if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) {
+ fprintf(stderr, "Expected: %s\n", err_msg);
+ fprintf(stderr, "Verifier: %s\n", log_buf);
+ }
+
+end:
+ test_bpf_nf_fail__destroy(skel);
+}
+
void test_bpf_nf(void)
{
+ int i;
if (test__start_subtest("xdp-ct"))
test_bpf_nf_ct(TEST_XDP);
if (test__start_subtest("tc-bpf-ct"))
test_bpf_nf_ct(TEST_TC_BPF);
+ for (i = 0; i < ARRAY_SIZE(test_bpf_nf_fail_tests); i++) {
+ if (test__start_subtest(test_bpf_nf_fail_tests[i].prog_name))
+ test_bpf_nf_ct_fail(test_bpf_nf_fail_tests[i].prog_name,
+ test_bpf_nf_fail_tests[i].err_msg);
+ }
}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
index 8f7a1cef7d87..2959a52ced06 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
@@ -9,8 +9,9 @@
#include "bpf_cubic.skel.h"
#include "bpf_tcp_nogpl.skel.h"
#include "bpf_dctcp_release.skel.h"
-
-#define min(a, b) ((a) < (b) ? (a) : (b))
+#include "tcp_ca_write_sk_pacing.skel.h"
+#include "tcp_ca_incompl_cong_ops.skel.h"
+#include "tcp_ca_unsupp_cong_op.skel.h"
#ifndef ENOTSUPP
#define ENOTSUPP 524
@@ -53,7 +54,7 @@ static void *server(void *arg)
while (bytes < total_bytes && !READ_ONCE(stop)) {
nr_sent = send(fd, &batch,
- min(total_bytes - bytes, sizeof(batch)), 0);
+ MIN(total_bytes - bytes, sizeof(batch)), 0);
if (nr_sent == -1 && errno == EINTR)
continue;
if (nr_sent == -1) {
@@ -146,7 +147,7 @@ static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
/* recv total_bytes */
while (bytes < total_bytes && !READ_ONCE(stop)) {
nr_recv = recv(fd, &batch,
- min(total_bytes - bytes, sizeof(batch)), 0);
+ MIN(total_bytes - bytes, sizeof(batch)), 0);
if (nr_recv == -1 && errno == EINTR)
continue;
if (nr_recv == -1)
@@ -324,6 +325,58 @@ static void test_rel_setsockopt(void)
bpf_dctcp_release__destroy(rel_skel);
}
+static void test_write_sk_pacing(void)
+{
+ struct tcp_ca_write_sk_pacing *skel;
+ struct bpf_link *link;
+
+ skel = tcp_ca_write_sk_pacing__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ link = bpf_map__attach_struct_ops(skel->maps.write_sk_pacing);
+ ASSERT_OK_PTR(link, "attach_struct_ops");
+
+ bpf_link__destroy(link);
+ tcp_ca_write_sk_pacing__destroy(skel);
+}
+
+static void test_incompl_cong_ops(void)
+{
+ struct tcp_ca_incompl_cong_ops *skel;
+ struct bpf_link *link;
+
+ skel = tcp_ca_incompl_cong_ops__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ return;
+
+ /* That cong_avoid() and cong_control() are missing is only reported at
+ * this point:
+ */
+ link = bpf_map__attach_struct_ops(skel->maps.incompl_cong_ops);
+ ASSERT_ERR_PTR(link, "attach_struct_ops");
+
+ bpf_link__destroy(link);
+ tcp_ca_incompl_cong_ops__destroy(skel);
+}
+
+static void test_unsupp_cong_op(void)
+{
+ libbpf_print_fn_t old_print_fn;
+ struct tcp_ca_unsupp_cong_op *skel;
+
+ err_str = "attach to unsupported member get_info";
+ found = false;
+ old_print_fn = libbpf_set_print(libbpf_debug_print);
+
+ skel = tcp_ca_unsupp_cong_op__open_and_load();
+ ASSERT_NULL(skel, "open_and_load");
+ ASSERT_EQ(found, true, "expected_err_msg");
+
+ tcp_ca_unsupp_cong_op__destroy(skel);
+ libbpf_set_print(old_print_fn);
+}
+
void test_bpf_tcp_ca(void)
{
if (test__start_subtest("dctcp"))
@@ -336,4 +389,10 @@ void test_bpf_tcp_ca(void)
test_dctcp_fallback();
if (test__start_subtest("rel_setsockopt"))
test_rel_setsockopt();
+ if (test__start_subtest("write_sk_pacing"))
+ test_write_sk_pacing();
+ if (test__start_subtest("incompl_cong_ops"))
+ test_incompl_cong_ops();
+ if (test__start_subtest("unsupp_cong_op"))
+ test_unsupp_cong_op();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index ec823561b912..ef6528b8084c 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -8,7 +8,6 @@
#include <linux/filter.h>
#include <linux/unistd.h>
#include <bpf/bpf.h>
-#include <sys/resource.h>
#include <libelf.h>
#include <gelf.h>
#include <string.h>
@@ -35,7 +34,6 @@ static bool always_log;
#undef CHECK
#define CHECK(condition, format...) _CHECK(condition, "check", duration, format)
-#define BTF_END_RAW 0xdeadbeef
#define NAME_TBD 0xdeadb33f
#define NAME_NTH(N) (0xfffe0000 | N)
@@ -2898,26 +2896,6 @@ static struct btf_raw_test raw_tests[] = {
},
{
- .descr = "invalid enum kind_flag",
- .raw_types = {
- BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 1, 1), 4), /* [2] */
- BTF_ENUM_ENC(NAME_TBD, 0),
- BTF_END_RAW,
- },
- BTF_STR_SEC("\0A"),
- .map_type = BPF_MAP_TYPE_ARRAY,
- .map_name = "enum_type_check_btf",
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .key_type_id = 1,
- .value_type_id = 1,
- .max_entries = 4,
- .btf_load_err = true,
- .err_str = "Invalid btf_info kind_flag",
-},
-
-{
.descr = "valid fwd kind_flag",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
@@ -3974,6 +3952,141 @@ static struct btf_raw_test raw_tests[] = {
.value_type_id = 1,
.max_entries = 1,
},
+{
+ .descr = "type_tag test #2, type tag order",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_CONST_ENC(3), /* [2] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [3] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Type tags don't precede modifiers",
+},
+{
+ .descr = "type_tag test #3, type tag order",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */
+ BTF_CONST_ENC(4), /* [3] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Type tags don't precede modifiers",
+},
+{
+ .descr = "type_tag test #4, type tag order",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [2] */
+ BTF_CONST_ENC(4), /* [3] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Type tags don't precede modifiers",
+},
+{
+ .descr = "type_tag test #5, type tag order",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */
+ BTF_CONST_ENC(1), /* [3] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 2), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+},
+{
+ .descr = "type_tag test #6, type tag order",
+ .raw_types = {
+ BTF_PTR_ENC(2), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */
+ BTF_CONST_ENC(4), /* [3] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [4] */
+ BTF_PTR_ENC(6), /* [5] */
+ BTF_CONST_ENC(2), /* [6] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Type tags don't precede modifiers",
+},
+{
+ .descr = "enum64 test #1, unsigned, size 8",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 2), 8), /* [2] */
+ BTF_ENUM64_ENC(NAME_TBD, 0, 0),
+ BTF_ENUM64_ENC(NAME_TBD, 1, 1),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0a\0b\0c"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 8,
+ .key_type_id = 1,
+ .value_type_id = 2,
+ .max_entries = 1,
+},
+{
+ .descr = "enum64 test #2, signed, size 4",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM64, 1, 2), 4), /* [2] */
+ BTF_ENUM64_ENC(NAME_TBD, -1, 0),
+ BTF_ENUM64_ENC(NAME_TBD, 1, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0a\0b\0c"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 2,
+ .max_entries = 1,
+},
}; /* struct btf_raw_test raw_tests[] */
@@ -4538,7 +4651,6 @@ struct btf_file_test {
};
static struct btf_file_test file_tests[] = {
- { .file = "test_btf_haskv.o", },
{ .file = "test_btf_newkv.o", },
{ .file = "test_btf_nokv.o", .btf_kv_notfound = true, },
};
@@ -5226,7 +5338,7 @@ static void do_test_pprint(int test_num)
ret = snprintf(pin_path, sizeof(pin_path), "%s/%s",
"/sys/fs/bpf", test->map_name);
- if (CHECK(ret == sizeof(pin_path), "pin_path %s/%s is too long",
+ if (CHECK(ret >= sizeof(pin_path), "pin_path %s/%s is too long",
"/sys/fs/bpf", test->map_name)) {
err = -1;
goto done;
@@ -6902,9 +7014,12 @@ static struct btf_dedup_test dedup_tests[] = {
BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] decl_tag */
BTF_DECL_TAG_ENC(NAME_TBD, 7, -1), /* [17] decl_tag */
BTF_TYPE_TAG_ENC(NAME_TBD, 8), /* [18] type_tag */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 2), 8), /* [19] enum64 */
+ BTF_ENUM64_ENC(NAME_TBD, 0, 0),
+ BTF_ENUM64_ENC(NAME_TBD, 1, 1),
BTF_END_RAW,
},
- BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q\0R"),
+ BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q\0R\0S\0T\0U"),
},
.expect = {
.raw_types = {
@@ -6932,9 +7047,12 @@ static struct btf_dedup_test dedup_tests[] = {
BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] decl_tag */
BTF_DECL_TAG_ENC(NAME_TBD, 7, -1), /* [17] decl_tag */
BTF_TYPE_TAG_ENC(NAME_TBD, 8), /* [18] type_tag */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 2), 8), /* [19] enum64 */
+ BTF_ENUM64_ENC(NAME_TBD, 0, 0),
+ BTF_ENUM64_ENC(NAME_TBD, 1, 1),
BTF_END_RAW,
},
- BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q\0R"),
+ BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q\0R\0S\0T\0U"),
},
},
{
@@ -7395,6 +7513,91 @@ static struct btf_dedup_test dedup_tests[] = {
BTF_STR_SEC("\0tag1\0t\0m"),
},
},
+{
+ .descr = "dedup: enum64, standalone",
+ .input = {
+ .raw_types = {
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(2), 1, 123),
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(2), 1, 123),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val"),
+ },
+ .expect = {
+ .raw_types = {
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(2), 1, 123),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val"),
+ },
+},
+{
+ .descr = "dedup: enum64, fwd resolution",
+ .input = {
+ .raw_types = {
+ /* [1] fwd enum64 'e1' before full enum */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
+ /* [2] full enum64 'e1' after fwd */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(2), 1, 123),
+ /* [3] full enum64 'e2' before fwd */
+ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(4), 0, 456),
+ /* [4] fwd enum64 'e2' after full enum */
+ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
+ /* [5] incompatible full enum64 with different value */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(2), 0, 321),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
+ },
+ .expect = {
+ .raw_types = {
+ /* [1] full enum64 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(2), 1, 123),
+ /* [2] full enum64 'e2' */
+ BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(4), 0, 456),
+ /* [3] incompatible full enum64 with different value */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 8),
+ BTF_ENUM64_ENC(NAME_NTH(2), 0, 321),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
+ },
+},
+{
+ .descr = "dedup: enum and enum64, no dedup",
+ .input = {
+ .raw_types = {
+ /* [1] enum 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(2), 1),
+ /* [2] enum64 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 4),
+ BTF_ENUM64_ENC(NAME_NTH(2), 1, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val"),
+ },
+ .expect = {
+ .raw_types = {
+ /* [1] enum 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+ BTF_ENUM_ENC(NAME_NTH(2), 1),
+ /* [2] enum64 'e1' */
+ BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 1), 4),
+ BTF_ENUM64_ENC(NAME_NTH(2), 1, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0e1\0e1_val"),
+ },
+},
};
@@ -7419,6 +7622,8 @@ static int btf_type_size(const struct btf_type *t)
return base_size + sizeof(__u32);
case BTF_KIND_ENUM:
return base_size + vlen * sizeof(struct btf_enum);
+ case BTF_KIND_ENUM64:
+ return base_size + vlen * sizeof(struct btf_enum64);
case BTF_KIND_ARRAY:
return base_size + sizeof(struct btf_array);
case BTF_KIND_STRUCT:
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_write.c b/tools/testing/selftests/bpf/prog_tests/btf_write.c
index addf99c05896..6e36de1302fc 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_write.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_write.c
@@ -9,6 +9,7 @@ static void gen_btf(struct btf *btf)
const struct btf_var_secinfo *vi;
const struct btf_type *t;
const struct btf_member *m;
+ const struct btf_enum64 *v64;
const struct btf_enum *v;
const struct btf_param *p;
int id, err, str_off;
@@ -171,7 +172,7 @@ static void gen_btf(struct btf *btf)
ASSERT_STREQ(btf__str_by_offset(btf, v->name_off), "v2", "v2_name");
ASSERT_EQ(v->val, 2, "v2_val");
ASSERT_STREQ(btf_type_raw_dump(btf, 9),
- "[9] ENUM 'e1' size=4 vlen=2\n"
+ "[9] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n"
"\t'v1' val=1\n"
"\t'v2' val=2", "raw_dump");
@@ -202,7 +203,7 @@ static void gen_btf(struct btf *btf)
ASSERT_EQ(btf_vlen(t), 0, "enum_fwd_kind");
ASSERT_EQ(t->size, 4, "enum_fwd_sz");
ASSERT_STREQ(btf_type_raw_dump(btf, 12),
- "[12] ENUM 'enum_fwd' size=4 vlen=0", "raw_dump");
+ "[12] ENUM 'enum_fwd' encoding=UNSIGNED size=4 vlen=0", "raw_dump");
/* TYPEDEF */
id = btf__add_typedef(btf, "typedef1", 1);
@@ -307,6 +308,48 @@ static void gen_btf(struct btf *btf)
ASSERT_EQ(t->type, 1, "tag_type");
ASSERT_STREQ(btf_type_raw_dump(btf, 20),
"[20] TYPE_TAG 'tag1' type_id=1", "raw_dump");
+
+ /* ENUM64 */
+ id = btf__add_enum64(btf, "e1", 8, true);
+ ASSERT_EQ(id, 21, "enum64_id");
+ err = btf__add_enum64_value(btf, "v1", -1);
+ ASSERT_OK(err, "v1_res");
+ err = btf__add_enum64_value(btf, "v2", 0x123456789); /* 4886718345 */
+ ASSERT_OK(err, "v2_res");
+ t = btf__type_by_id(btf, 21);
+ ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "e1", "enum64_name");
+ ASSERT_EQ(btf_kind(t), BTF_KIND_ENUM64, "enum64_kind");
+ ASSERT_EQ(btf_vlen(t), 2, "enum64_vlen");
+ ASSERT_EQ(t->size, 8, "enum64_sz");
+ v64 = btf_enum64(t) + 0;
+ ASSERT_STREQ(btf__str_by_offset(btf, v64->name_off), "v1", "v1_name");
+ ASSERT_EQ(v64->val_hi32, 0xffffffff, "v1_val");
+ ASSERT_EQ(v64->val_lo32, 0xffffffff, "v1_val");
+ v64 = btf_enum64(t) + 1;
+ ASSERT_STREQ(btf__str_by_offset(btf, v64->name_off), "v2", "v2_name");
+ ASSERT_EQ(v64->val_hi32, 0x1, "v2_val");
+ ASSERT_EQ(v64->val_lo32, 0x23456789, "v2_val");
+ ASSERT_STREQ(btf_type_raw_dump(btf, 21),
+ "[21] ENUM64 'e1' encoding=SIGNED size=8 vlen=2\n"
+ "\t'v1' val=-1\n"
+ "\t'v2' val=4886718345", "raw_dump");
+
+ id = btf__add_enum64(btf, "e1", 8, false);
+ ASSERT_EQ(id, 22, "enum64_id");
+ err = btf__add_enum64_value(btf, "v1", 0xffffffffFFFFFFFF); /* 18446744073709551615 */
+ ASSERT_OK(err, "v1_res");
+ t = btf__type_by_id(btf, 22);
+ ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "e1", "enum64_name");
+ ASSERT_EQ(btf_kind(t), BTF_KIND_ENUM64, "enum64_kind");
+ ASSERT_EQ(btf_vlen(t), 1, "enum64_vlen");
+ ASSERT_EQ(t->size, 8, "enum64_sz");
+ v64 = btf_enum64(t) + 0;
+ ASSERT_STREQ(btf__str_by_offset(btf, v64->name_off), "v1", "v1_name");
+ ASSERT_EQ(v64->val_hi32, 0xffffffff, "v1_val");
+ ASSERT_EQ(v64->val_lo32, 0xffffffff, "v1_val");
+ ASSERT_STREQ(btf_type_raw_dump(btf, 22),
+ "[22] ENUM64 'e1' encoding=UNSIGNED size=8 vlen=1\n"
+ "\t'v1' val=18446744073709551615", "raw_dump");
}
static void test_btf_add()
@@ -332,12 +375,12 @@ static void test_btf_add()
"\t'f2' type_id=1 bits_offset=32 bitfield_size=16",
"[8] UNION 'u1' size=8 vlen=1\n"
"\t'f1' type_id=1 bits_offset=0 bitfield_size=16",
- "[9] ENUM 'e1' size=4 vlen=2\n"
+ "[9] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n"
"\t'v1' val=1\n"
"\t'v2' val=2",
"[10] FWD 'struct_fwd' fwd_kind=struct",
"[11] FWD 'union_fwd' fwd_kind=union",
- "[12] ENUM 'enum_fwd' size=4 vlen=0",
+ "[12] ENUM 'enum_fwd' encoding=UNSIGNED size=4 vlen=0",
"[13] TYPEDEF 'typedef1' type_id=1",
"[14] FUNC 'func1' type_id=15 linkage=global",
"[15] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n"
@@ -348,7 +391,12 @@ static void test_btf_add()
"\ttype_id=1 offset=4 size=8",
"[18] DECL_TAG 'tag1' type_id=16 component_idx=-1",
"[19] DECL_TAG 'tag2' type_id=14 component_idx=1",
- "[20] TYPE_TAG 'tag1' type_id=1");
+ "[20] TYPE_TAG 'tag1' type_id=1",
+ "[21] ENUM64 'e1' encoding=SIGNED size=8 vlen=2\n"
+ "\t'v1' val=-1\n"
+ "\t'v2' val=4886718345",
+ "[22] ENUM64 'e1' encoding=UNSIGNED size=8 vlen=1\n"
+ "\t'v1' val=18446744073709551615");
btf__free(btf);
}
@@ -370,7 +418,7 @@ static void test_btf_add_btf()
gen_btf(btf2);
id = btf__add_btf(btf1, btf2);
- if (!ASSERT_EQ(id, 21, "id"))
+ if (!ASSERT_EQ(id, 23, "id"))
goto cleanup;
VALIDATE_RAW_BTF(
@@ -386,12 +434,12 @@ static void test_btf_add_btf()
"\t'f2' type_id=1 bits_offset=32 bitfield_size=16",
"[8] UNION 'u1' size=8 vlen=1\n"
"\t'f1' type_id=1 bits_offset=0 bitfield_size=16",
- "[9] ENUM 'e1' size=4 vlen=2\n"
+ "[9] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n"
"\t'v1' val=1\n"
"\t'v2' val=2",
"[10] FWD 'struct_fwd' fwd_kind=struct",
"[11] FWD 'union_fwd' fwd_kind=union",
- "[12] ENUM 'enum_fwd' size=4 vlen=0",
+ "[12] ENUM 'enum_fwd' encoding=UNSIGNED size=4 vlen=0",
"[13] TYPEDEF 'typedef1' type_id=1",
"[14] FUNC 'func1' type_id=15 linkage=global",
"[15] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n"
@@ -403,36 +451,46 @@ static void test_btf_add_btf()
"[18] DECL_TAG 'tag1' type_id=16 component_idx=-1",
"[19] DECL_TAG 'tag2' type_id=14 component_idx=1",
"[20] TYPE_TAG 'tag1' type_id=1",
+ "[21] ENUM64 'e1' encoding=SIGNED size=8 vlen=2\n"
+ "\t'v1' val=-1\n"
+ "\t'v2' val=4886718345",
+ "[22] ENUM64 'e1' encoding=UNSIGNED size=8 vlen=1\n"
+ "\t'v1' val=18446744073709551615",
/* types appended from the second BTF */
- "[21] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
- "[22] PTR '(anon)' type_id=21",
- "[23] CONST '(anon)' type_id=25",
- "[24] VOLATILE '(anon)' type_id=23",
- "[25] RESTRICT '(anon)' type_id=24",
- "[26] ARRAY '(anon)' type_id=22 index_type_id=21 nr_elems=10",
- "[27] STRUCT 's1' size=8 vlen=2\n"
- "\t'f1' type_id=21 bits_offset=0\n"
- "\t'f2' type_id=21 bits_offset=32 bitfield_size=16",
- "[28] UNION 'u1' size=8 vlen=1\n"
- "\t'f1' type_id=21 bits_offset=0 bitfield_size=16",
- "[29] ENUM 'e1' size=4 vlen=2\n"
+ "[23] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
+ "[24] PTR '(anon)' type_id=23",
+ "[25] CONST '(anon)' type_id=27",
+ "[26] VOLATILE '(anon)' type_id=25",
+ "[27] RESTRICT '(anon)' type_id=26",
+ "[28] ARRAY '(anon)' type_id=24 index_type_id=23 nr_elems=10",
+ "[29] STRUCT 's1' size=8 vlen=2\n"
+ "\t'f1' type_id=23 bits_offset=0\n"
+ "\t'f2' type_id=23 bits_offset=32 bitfield_size=16",
+ "[30] UNION 'u1' size=8 vlen=1\n"
+ "\t'f1' type_id=23 bits_offset=0 bitfield_size=16",
+ "[31] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n"
"\t'v1' val=1\n"
"\t'v2' val=2",
- "[30] FWD 'struct_fwd' fwd_kind=struct",
- "[31] FWD 'union_fwd' fwd_kind=union",
- "[32] ENUM 'enum_fwd' size=4 vlen=0",
- "[33] TYPEDEF 'typedef1' type_id=21",
- "[34] FUNC 'func1' type_id=35 linkage=global",
- "[35] FUNC_PROTO '(anon)' ret_type_id=21 vlen=2\n"
- "\t'p1' type_id=21\n"
- "\t'p2' type_id=22",
- "[36] VAR 'var1' type_id=21, linkage=global-alloc",
- "[37] DATASEC 'datasec1' size=12 vlen=1\n"
- "\ttype_id=21 offset=4 size=8",
- "[38] DECL_TAG 'tag1' type_id=36 component_idx=-1",
- "[39] DECL_TAG 'tag2' type_id=34 component_idx=1",
- "[40] TYPE_TAG 'tag1' type_id=21");
+ "[32] FWD 'struct_fwd' fwd_kind=struct",
+ "[33] FWD 'union_fwd' fwd_kind=union",
+ "[34] ENUM 'enum_fwd' encoding=UNSIGNED size=4 vlen=0",
+ "[35] TYPEDEF 'typedef1' type_id=23",
+ "[36] FUNC 'func1' type_id=37 linkage=global",
+ "[37] FUNC_PROTO '(anon)' ret_type_id=23 vlen=2\n"
+ "\t'p1' type_id=23\n"
+ "\t'p2' type_id=24",
+ "[38] VAR 'var1' type_id=23, linkage=global-alloc",
+ "[39] DATASEC 'datasec1' size=12 vlen=1\n"
+ "\ttype_id=23 offset=4 size=8",
+ "[40] DECL_TAG 'tag1' type_id=38 component_idx=-1",
+ "[41] DECL_TAG 'tag2' type_id=36 component_idx=1",
+ "[42] TYPE_TAG 'tag1' type_id=23",
+ "[43] ENUM64 'e1' encoding=SIGNED size=8 vlen=2\n"
+ "\t'v1' val=-1\n"
+ "\t'v2' val=4886718345",
+ "[44] ENUM64 'e1' encoding=UNSIGNED size=8 vlen=1\n"
+ "\t'v1' val=18446744073709551615");
cleanup:
btf__free(btf1);
diff --git a/tools/testing/selftests/bpf/prog_tests/core_autosize.c b/tools/testing/selftests/bpf/prog_tests/core_autosize.c
index 1dfe14ff6aa4..f2ce4fd1cdae 100644
--- a/tools/testing/selftests/bpf/prog_tests/core_autosize.c
+++ b/tools/testing/selftests/bpf/prog_tests/core_autosize.c
@@ -167,7 +167,7 @@ void test_core_autosize(void)
if (!ASSERT_OK_PTR(bss_map, "bss_map_find"))
goto cleanup;
- err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &zero, (void *)&out);
+ err = bpf_map__lookup_elem(bss_map, &zero, sizeof(zero), &out, sizeof(out), 0);
if (!ASSERT_OK(err, "bss_lookup"))
goto cleanup;
diff --git a/tools/testing/selftests/bpf/prog_tests/core_extern.c b/tools/testing/selftests/bpf/prog_tests/core_extern.c
index 1931a158510e..63a51e9f3630 100644
--- a/tools/testing/selftests/bpf/prog_tests/core_extern.c
+++ b/tools/testing/selftests/bpf/prog_tests/core_extern.c
@@ -39,6 +39,7 @@ static struct test_case {
"CONFIG_STR=\"abracad\"\n"
"CONFIG_MISSING=0",
.data = {
+ .unkn_virt_val = 0,
.bpf_syscall = false,
.tristate_val = TRI_MODULE,
.bool_val = true,
@@ -121,7 +122,7 @@ static struct test_case {
void test_core_extern(void)
{
const uint32_t kern_ver = get_kernel_version();
- int err, duration = 0, i, j;
+ int err, i, j;
struct test_core_extern *skel = NULL;
uint64_t *got, *exp;
int n = sizeof(*skel->data) / sizeof(uint64_t);
@@ -136,19 +137,17 @@ void test_core_extern(void)
continue;
skel = test_core_extern__open_opts(&opts);
- if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
err = test_core_extern__load(skel);
if (t->fails) {
- CHECK(!err, "skel_load",
- "shouldn't succeed open/load of skeleton\n");
+ ASSERT_ERR(err, "skel_load_should_fail");
goto cleanup;
- } else if (CHECK(err, "skel_load",
- "failed to open/load skeleton\n")) {
+ } else if (!ASSERT_OK(err, "skel_load")) {
goto cleanup;
}
err = test_core_extern__attach(skel);
- if (CHECK(err, "attach_raw_tp", "failed attach: %d\n", err))
+ if (!ASSERT_OK(err, "attach_raw_tp"))
goto cleanup;
usleep(1);
@@ -158,9 +157,7 @@ void test_core_extern(void)
got = (uint64_t *)skel->data;
exp = (uint64_t *)&t->data;
for (j = 0; j < n; j++) {
- CHECK(got[j] != exp[j], "check_res",
- "result #%d: expected %llx, but got %llx\n",
- j, (__u64)exp[j], (__u64)got[j]);
+ ASSERT_EQ(got[j], exp[j], "result");
}
cleanup:
test_core_extern__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
index f28f75aa9154..c8655ba9a88f 100644
--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
@@ -84,6 +84,7 @@ static int duration = 0;
#define NESTING_ERR_CASE(name) { \
NESTING_CASE_COMMON(name), \
.fails = true, \
+ .run_btfgen_fails = true, \
}
#define ARRAYS_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \
@@ -258,12 +259,14 @@ static int duration = 0;
BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \
"probed:", name), \
.fails = true, \
+ .run_btfgen_fails = true, \
.raw_tp_name = "sys_enter", \
.prog_name = "test_core_bitfields", \
}, { \
BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \
"direct:", name), \
.fails = true, \
+ .run_btfgen_fails = true, \
.prog_name = "test_core_bitfields_direct", \
}
@@ -277,13 +280,21 @@ static int duration = 0;
#define SIZE_OUTPUT_DATA(type) \
STRUCT_TO_CHAR_PTR(core_reloc_size_output) { \
.int_sz = sizeof(((type *)0)->int_field), \
+ .int_off = offsetof(type, int_field), \
.struct_sz = sizeof(((type *)0)->struct_field), \
+ .struct_off = offsetof(type, struct_field), \
.union_sz = sizeof(((type *)0)->union_field), \
+ .union_off = offsetof(type, union_field), \
.arr_sz = sizeof(((type *)0)->arr_field), \
- .arr_elem_sz = sizeof(((type *)0)->arr_field[0]), \
+ .arr_off = offsetof(type, arr_field), \
+ .arr_elem_sz = sizeof(((type *)0)->arr_field[1]), \
+ .arr_elem_off = offsetof(type, arr_field[1]), \
.ptr_sz = 8, /* always 8-byte pointer for BPF */ \
+ .ptr_off = offsetof(type, ptr_field), \
.enum_sz = sizeof(((type *)0)->enum_field), \
+ .enum_off = offsetof(type, enum_field), \
.float_sz = sizeof(((type *)0)->float_field), \
+ .float_off = offsetof(type, float_field), \
}
#define SIZE_CASE(name) { \
@@ -296,6 +307,7 @@ static int duration = 0;
#define SIZE_ERR_CASE(name) { \
SIZE_CASE_COMMON(name), \
.fails = true, \
+ .run_btfgen_fails = true, \
}
#define TYPE_BASED_CASE_COMMON(name) \
@@ -355,6 +367,25 @@ static int duration = 0;
.fails = true, \
}
+#define ENUM64VAL_CASE_COMMON(name) \
+ .case_name = #name, \
+ .bpf_obj_file = "test_core_reloc_enum64val.o", \
+ .btf_src_file = "btf__core_reloc_" #name ".o", \
+ .raw_tp_name = "sys_enter", \
+ .prog_name = "test_core_enum64val"
+
+#define ENUM64VAL_CASE(name, ...) { \
+ ENUM64VAL_CASE_COMMON(name), \
+ .output = STRUCT_TO_CHAR_PTR(core_reloc_enum64val_output) \
+ __VA_ARGS__, \
+ .output_len = sizeof(struct core_reloc_enum64val_output), \
+}
+
+#define ENUM64VAL_ERR_CASE(name) { \
+ ENUM64VAL_CASE_COMMON(name), \
+ .fails = true, \
+}
+
struct core_reloc_test_case;
typedef int (*setup_test_fn)(struct core_reloc_test_case *test);
@@ -369,6 +400,7 @@ struct core_reloc_test_case {
const char *output;
int output_len;
bool fails;
+ bool run_btfgen_fails;
bool needs_testmod;
bool relaxed_core_relocs;
const char *prog_name;
@@ -511,7 +543,6 @@ static int __trigger_module_test_read(const struct core_reloc_test_case *test)
return 0;
}
-
static const struct core_reloc_test_case test_cases[] = {
/* validate we can find kernel image and use its BTF for relocs */
{
@@ -524,6 +555,7 @@ static const struct core_reloc_test_case test_cases[] = {
.valid = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, },
.comm = "test_progs",
.comm_len = sizeof("test_progs"),
+ .local_task_struct_matches = true,
},
.output_len = sizeof(struct core_reloc_kernel_output),
.raw_tp_name = "sys_enter",
@@ -714,14 +746,16 @@ static const struct core_reloc_test_case test_cases[] = {
}),
BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield),
- /* size relocation checks */
+ /* field size and offset relocation checks */
SIZE_CASE(size),
SIZE_CASE(size___diff_sz),
+ SIZE_CASE(size___diff_offs),
SIZE_ERR_CASE(size___err_ambiguous),
- /* validate type existence and size relocations */
+ /* validate type existence, match, and size relocations */
TYPE_BASED_CASE(type_based, {
.struct_exists = 1,
+ .complex_struct_exists = 1,
.union_exists = 1,
.enum_exists = 1,
.typedef_named_struct_exists = 1,
@@ -730,8 +764,24 @@ static const struct core_reloc_test_case test_cases[] = {
.typedef_int_exists = 1,
.typedef_enum_exists = 1,
.typedef_void_ptr_exists = 1,
+ .typedef_restrict_ptr_exists = 1,
.typedef_func_proto_exists = 1,
.typedef_arr_exists = 1,
+
+ .struct_matches = 1,
+ .complex_struct_matches = 1,
+ .union_matches = 1,
+ .enum_matches = 1,
+ .typedef_named_struct_matches = 1,
+ .typedef_anon_struct_matches = 1,
+ .typedef_struct_ptr_matches = 1,
+ .typedef_int_matches = 1,
+ .typedef_enum_matches = 1,
+ .typedef_void_ptr_matches = 1,
+ .typedef_restrict_ptr_matches = 1,
+ .typedef_func_proto_matches = 1,
+ .typedef_arr_matches = 1,
+
.struct_sz = sizeof(struct a_struct),
.union_sz = sizeof(union a_union),
.enum_sz = sizeof(enum an_enum),
@@ -747,6 +797,45 @@ static const struct core_reloc_test_case test_cases[] = {
TYPE_BASED_CASE(type_based___all_missing, {
/* all zeros */
}),
+ TYPE_BASED_CASE(type_based___diff, {
+ .struct_exists = 1,
+ .complex_struct_exists = 1,
+ .union_exists = 1,
+ .enum_exists = 1,
+ .typedef_named_struct_exists = 1,
+ .typedef_anon_struct_exists = 1,
+ .typedef_struct_ptr_exists = 1,
+ .typedef_int_exists = 1,
+ .typedef_enum_exists = 1,
+ .typedef_void_ptr_exists = 1,
+ .typedef_func_proto_exists = 1,
+ .typedef_arr_exists = 1,
+
+ .struct_matches = 1,
+ .complex_struct_matches = 1,
+ .union_matches = 1,
+ .enum_matches = 1,
+ .typedef_named_struct_matches = 1,
+ .typedef_anon_struct_matches = 1,
+ .typedef_struct_ptr_matches = 1,
+ .typedef_int_matches = 0,
+ .typedef_enum_matches = 1,
+ .typedef_void_ptr_matches = 1,
+ .typedef_func_proto_matches = 0,
+ .typedef_arr_matches = 0,
+
+ .struct_sz = sizeof(struct a_struct___diff),
+ .union_sz = sizeof(union a_union___diff),
+ .enum_sz = sizeof(enum an_enum___diff),
+ .typedef_named_struct_sz = sizeof(named_struct_typedef___diff),
+ .typedef_anon_struct_sz = sizeof(anon_struct_typedef___diff),
+ .typedef_struct_ptr_sz = sizeof(struct_ptr_typedef___diff),
+ .typedef_int_sz = sizeof(int_typedef___diff),
+ .typedef_enum_sz = sizeof(enum_typedef___diff),
+ .typedef_void_ptr_sz = sizeof(void_ptr_typedef___diff),
+ .typedef_func_proto_sz = sizeof(func_proto_typedef___diff),
+ .typedef_arr_sz = sizeof(arr_typedef___diff),
+ }),
TYPE_BASED_CASE(type_based___diff_sz, {
.struct_exists = 1,
.union_exists = 1,
@@ -759,6 +848,19 @@ static const struct core_reloc_test_case test_cases[] = {
.typedef_void_ptr_exists = 1,
.typedef_func_proto_exists = 1,
.typedef_arr_exists = 1,
+
+ .struct_matches = 0,
+ .union_matches = 0,
+ .enum_matches = 0,
+ .typedef_named_struct_matches = 0,
+ .typedef_anon_struct_matches = 0,
+ .typedef_struct_ptr_matches = 1,
+ .typedef_int_matches = 0,
+ .typedef_enum_matches = 0,
+ .typedef_void_ptr_matches = 1,
+ .typedef_func_proto_matches = 0,
+ .typedef_arr_matches = 0,
+
.struct_sz = sizeof(struct a_struct___diff_sz),
.union_sz = sizeof(union a_union___diff_sz),
.enum_sz = sizeof(enum an_enum___diff_sz),
@@ -773,10 +875,12 @@ static const struct core_reloc_test_case test_cases[] = {
}),
TYPE_BASED_CASE(type_based___incompat, {
.enum_exists = 1,
+ .enum_matches = 1,
.enum_sz = sizeof(enum an_enum),
}),
TYPE_BASED_CASE(type_based___fn_wrong_args, {
.struct_exists = 1,
+ .struct_matches = 1,
.struct_sz = sizeof(struct a_struct),
}),
@@ -822,6 +926,45 @@ static const struct core_reloc_test_case test_cases[] = {
.anon_val2 = 0x222,
}),
ENUMVAL_ERR_CASE(enumval___err_missing),
+
+ /* 64bit enumerator value existence and value relocations */
+ ENUM64VAL_CASE(enum64val, {
+ .unsigned_val1_exists = true,
+ .unsigned_val2_exists = true,
+ .unsigned_val3_exists = true,
+ .signed_val1_exists = true,
+ .signed_val2_exists = true,
+ .signed_val3_exists = true,
+ .unsigned_val1 = 0x1ffffffffULL,
+ .unsigned_val2 = 0x2,
+ .signed_val1 = 0x1ffffffffLL,
+ .signed_val2 = -2,
+ }),
+ ENUM64VAL_CASE(enum64val___diff, {
+ .unsigned_val1_exists = true,
+ .unsigned_val2_exists = true,
+ .unsigned_val3_exists = true,
+ .signed_val1_exists = true,
+ .signed_val2_exists = true,
+ .signed_val3_exists = true,
+ .unsigned_val1 = 0x101ffffffffULL,
+ .unsigned_val2 = 0x202ffffffffULL,
+ .signed_val1 = -101,
+ .signed_val2 = -202,
+ }),
+ ENUM64VAL_CASE(enum64val___val3_missing, {
+ .unsigned_val1_exists = true,
+ .unsigned_val2_exists = true,
+ .unsigned_val3_exists = false,
+ .signed_val1_exists = true,
+ .signed_val2_exists = true,
+ .signed_val3_exists = false,
+ .unsigned_val1 = 0x111ffffffffULL,
+ .unsigned_val2 = 0x222,
+ .signed_val1 = 0x111ffffffffLL,
+ .signed_val2 = -222,
+ }),
+ ENUM64VAL_ERR_CASE(enum64val___err_missing),
};
struct data {
@@ -885,7 +1028,7 @@ static void run_core_reloc_tests(bool use_btfgen)
/* generate a "minimal" BTF file and use it as source */
if (use_btfgen) {
- if (!test_case->btf_src_file || test_case->fails) {
+ if (!test_case->btf_src_file || test_case->run_btfgen_fails) {
test__skip();
continue;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/core_retro.c b/tools/testing/selftests/bpf/prog_tests/core_retro.c
index 6acb0e94d4d7..4a2c256c8db6 100644
--- a/tools/testing/selftests/bpf/prog_tests/core_retro.c
+++ b/tools/testing/selftests/bpf/prog_tests/core_retro.c
@@ -6,31 +6,32 @@
void test_core_retro(void)
{
- int err, zero = 0, res, duration = 0, my_pid = getpid();
+ int err, zero = 0, res, my_pid = getpid();
struct test_core_retro *skel;
/* load program */
skel = test_core_retro__open_and_load();
- if (CHECK(!skel, "skel_load", "skeleton open/load failed\n"))
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
goto out_close;
- err = bpf_map_update_elem(bpf_map__fd(skel->maps.exp_tgid_map), &zero, &my_pid, 0);
- if (CHECK(err, "map_update", "failed to set expected PID: %d\n", errno))
+ err = bpf_map__update_elem(skel->maps.exp_tgid_map, &zero, sizeof(zero),
+ &my_pid, sizeof(my_pid), 0);
+ if (!ASSERT_OK(err, "map_update"))
goto out_close;
/* attach probe */
err = test_core_retro__attach(skel);
- if (CHECK(err, "attach_kprobe", "err %d\n", err))
+ if (!ASSERT_OK(err, "attach_kprobe"))
goto out_close;
/* trigger */
usleep(1);
- err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.results), &zero, &res);
- if (CHECK(err, "map_lookup", "failed to lookup result: %d\n", errno))
+ err = bpf_map__lookup_elem(skel->maps.results, &zero, sizeof(zero), &res, sizeof(res), 0);
+ if (!ASSERT_OK(err, "map_lookup"))
goto out_close;
- CHECK(res != my_pid, "pid_check", "got %d != exp %d\n", res, my_pid);
+ ASSERT_EQ(res, my_pid, "pid_check");
out_close:
test_core_retro__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c
new file mode 100644
index 000000000000..3c7aa82b98e2
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Facebook */
+
+#include <test_progs.h>
+#include "dynptr_fail.skel.h"
+#include "dynptr_success.skel.h"
+
+static size_t log_buf_sz = 1048576; /* 1 MB */
+static char obj_log_buf[1048576];
+
+static struct {
+ const char *prog_name;
+ const char *expected_err_msg;
+} dynptr_tests[] = {
+ /* failure cases */
+ {"ringbuf_missing_release1", "Unreleased reference id=1"},
+ {"ringbuf_missing_release2", "Unreleased reference id=2"},
+ {"ringbuf_missing_release_callback", "Unreleased reference id"},
+ {"use_after_invalid", "Expected an initialized dynptr as arg #3"},
+ {"ringbuf_invalid_api", "type=mem expected=alloc_mem"},
+ {"add_dynptr_to_map1", "invalid indirect read from stack"},
+ {"add_dynptr_to_map2", "invalid indirect read from stack"},
+ {"data_slice_out_of_bounds_ringbuf", "value is outside of the allowed memory range"},
+ {"data_slice_out_of_bounds_map_value", "value is outside of the allowed memory range"},
+ {"data_slice_use_after_release", "invalid mem access 'scalar'"},
+ {"data_slice_missing_null_check1", "invalid mem access 'mem_or_null'"},
+ {"data_slice_missing_null_check2", "invalid mem access 'mem_or_null'"},
+ {"invalid_helper1", "invalid indirect read from stack"},
+ {"invalid_helper2", "Expected an initialized dynptr as arg #3"},
+ {"invalid_write1", "Expected an initialized dynptr as arg #1"},
+ {"invalid_write2", "Expected an initialized dynptr as arg #3"},
+ {"invalid_write3", "Expected an initialized ringbuf dynptr as arg #1"},
+ {"invalid_write4", "arg 1 is an unacquired reference"},
+ {"invalid_read1", "invalid read from stack"},
+ {"invalid_read2", "cannot pass in dynptr at an offset"},
+ {"invalid_read3", "invalid read from stack"},
+ {"invalid_read4", "invalid read from stack"},
+ {"invalid_offset", "invalid write to stack"},
+ {"global", "type=map_value expected=fp"},
+ {"release_twice", "arg 1 is an unacquired reference"},
+ {"release_twice_callback", "arg 1 is an unacquired reference"},
+ {"dynptr_from_mem_invalid_api",
+ "Unsupported reg type fp for bpf_dynptr_from_mem data"},
+
+ /* success cases */
+ {"test_read_write", NULL},
+ {"test_data_slice", NULL},
+ {"test_ringbuf", NULL},
+};
+
+static void verify_fail(const char *prog_name, const char *expected_err_msg)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ struct bpf_program *prog;
+ struct dynptr_fail *skel;
+ int err;
+
+ opts.kernel_log_buf = obj_log_buf;
+ opts.kernel_log_size = log_buf_sz;
+ opts.kernel_log_level = 1;
+
+ skel = dynptr_fail__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "dynptr_fail__open_opts"))
+ goto cleanup;
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto cleanup;
+
+ bpf_program__set_autoload(prog, true);
+
+ bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize());
+
+ err = dynptr_fail__load(skel);
+ if (!ASSERT_ERR(err, "unexpected load success"))
+ goto cleanup;
+
+ if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) {
+ fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg);
+ fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
+ }
+
+cleanup:
+ dynptr_fail__destroy(skel);
+}
+
+static void verify_success(const char *prog_name)
+{
+ struct dynptr_success *skel;
+ struct bpf_program *prog;
+ struct bpf_link *link;
+
+ skel = dynptr_success__open();
+ if (!ASSERT_OK_PTR(skel, "dynptr_success__open"))
+ return;
+
+ skel->bss->pid = getpid();
+
+ bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize());
+
+ dynptr_success__load(skel);
+ if (!ASSERT_OK_PTR(skel, "dynptr_success__load"))
+ goto cleanup;
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto cleanup;
+
+ link = bpf_program__attach(prog);
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach"))
+ goto cleanup;
+
+ usleep(1);
+
+ ASSERT_EQ(skel->bss->err, 0, "err");
+
+ bpf_link__destroy(link);
+
+cleanup:
+ dynptr_success__destroy(skel);
+}
+
+void test_dynptr(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dynptr_tests); i++) {
+ if (!test__start_subtest(dynptr_tests[i].prog_name))
+ continue;
+
+ if (dynptr_tests[i].expected_err_msg)
+ verify_fail(dynptr_tests[i].prog_name,
+ dynptr_tests[i].expected_err_msg);
+ else
+ verify_success(dynptr_tests[i].prog_name);
+ }
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
index d9aad15e0d24..02bb8cbf9194 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
@@ -395,6 +395,18 @@ static void test_func_map_prog_compatibility(void)
"./test_attach_probe.o");
}
+static void test_func_replace_global_func(void)
+{
+ const char *prog_name[] = {
+ "freplace/test_pkt_access",
+ };
+
+ test_fexit_bpf2bpf_common("./freplace_global_func.o",
+ "./test_pkt_access.o",
+ ARRAY_SIZE(prog_name),
+ prog_name, false, NULL);
+}
+
/* NOTE: affect other tests, must run in serial mode */
void serial_test_fexit_bpf2bpf(void)
{
@@ -416,4 +428,6 @@ void serial_test_fexit_bpf2bpf(void)
test_func_replace_multi();
if (test__start_subtest("fmod_ret_freplace"))
test_fmod_ret_freplace();
+ if (test__start_subtest("func_replace_global_func"))
+ test_func_replace_global_func();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
index 3ee2107bbf7a..5a7e6011f6bf 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
@@ -5,13 +5,11 @@
/* that's kernel internal BPF_MAX_TRAMP_PROGS define */
#define CNT 38
-void test_fexit_stress(void)
+void serial_test_fexit_stress(void)
{
- char test_skb[128] = {};
int fexit_fd[CNT] = {};
int link_fd[CNT] = {};
- char error[4096];
- int err, i, filter_fd;
+ int err, i;
const struct bpf_insn trace_program[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
@@ -20,25 +18,9 @@ void test_fexit_stress(void)
LIBBPF_OPTS(bpf_prog_load_opts, trace_opts,
.expected_attach_type = BPF_TRACE_FEXIT,
- .log_buf = error,
- .log_size = sizeof(error),
);
- const struct bpf_insn skb_program[] = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
-
- LIBBPF_OPTS(bpf_prog_load_opts, skb_opts,
- .log_buf = error,
- .log_size = sizeof(error),
- );
-
- LIBBPF_OPTS(bpf_test_run_opts, topts,
- .data_in = test_skb,
- .data_size_in = sizeof(test_skb),
- .repeat = 1,
- );
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
err = libbpf_find_vmlinux_btf_id("bpf_fentry_test1",
trace_opts.expected_attach_type);
@@ -53,20 +35,14 @@ void test_fexit_stress(void)
&trace_opts);
if (!ASSERT_GE(fexit_fd[i], 0, "fexit load"))
goto out;
- link_fd[i] = bpf_raw_tracepoint_open(NULL, fexit_fd[i]);
+ link_fd[i] = bpf_link_create(fexit_fd[i], 0, BPF_TRACE_FEXIT, NULL);
if (!ASSERT_GE(link_fd[i], 0, "fexit attach"))
goto out;
}
- filter_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL",
- skb_program, sizeof(skb_program) / sizeof(struct bpf_insn),
- &skb_opts);
- if (!ASSERT_GE(filter_fd, 0, "test_program_loaded"))
- goto out;
+ err = bpf_prog_test_run_opts(fexit_fd[0], &topts);
+ ASSERT_OK(err, "bpf_prog_test_run_opts");
- err = bpf_prog_test_run_opts(filter_fd, &topts);
- close(filter_fd);
- CHECK_FAIL(err);
out:
for (i = 0; i < CNT; i++) {
if (link_fd[i])
diff --git a/tools/testing/selftests/bpf/prog_tests/for_each.c b/tools/testing/selftests/bpf/prog_tests/for_each.c
index 044df13ee069..8963f8a549f2 100644
--- a/tools/testing/selftests/bpf/prog_tests/for_each.c
+++ b/tools/testing/selftests/bpf/prog_tests/for_each.c
@@ -4,14 +4,16 @@
#include <network_helpers.h>
#include "for_each_hash_map_elem.skel.h"
#include "for_each_array_map_elem.skel.h"
+#include "for_each_map_elem_write_key.skel.h"
static unsigned int duration;
static void test_hash_map(void)
{
- int i, err, hashmap_fd, max_entries, percpu_map_fd;
+ int i, err, max_entries;
struct for_each_hash_map_elem *skel;
__u64 *percpu_valbuf = NULL;
+ size_t percpu_val_sz;
__u32 key, num_cpus;
__u64 val;
LIBBPF_OPTS(bpf_test_run_opts, topts,
@@ -24,26 +26,27 @@ static void test_hash_map(void)
if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load"))
return;
- hashmap_fd = bpf_map__fd(skel->maps.hashmap);
max_entries = bpf_map__max_entries(skel->maps.hashmap);
for (i = 0; i < max_entries; i++) {
key = i;
val = i + 1;
- err = bpf_map_update_elem(hashmap_fd, &key, &val, BPF_ANY);
+ err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key),
+ &val, sizeof(val), BPF_ANY);
if (!ASSERT_OK(err, "map_update"))
goto out;
}
num_cpus = bpf_num_possible_cpus();
- percpu_map_fd = bpf_map__fd(skel->maps.percpu_map);
- percpu_valbuf = malloc(sizeof(__u64) * num_cpus);
+ percpu_val_sz = sizeof(__u64) * num_cpus;
+ percpu_valbuf = malloc(percpu_val_sz);
if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
goto out;
key = 1;
for (i = 0; i < num_cpus; i++)
percpu_valbuf[i] = i + 1;
- err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY);
+ err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
+ percpu_valbuf, percpu_val_sz, BPF_ANY);
if (!ASSERT_OK(err, "percpu_map_update"))
goto out;
@@ -57,7 +60,7 @@ static void test_hash_map(void)
ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems");
key = 1;
- err = bpf_map_lookup_elem(hashmap_fd, &key, &val);
+ err = bpf_map__lookup_elem(skel->maps.hashmap, &key, sizeof(key), &val, sizeof(val), 0);
ASSERT_ERR(err, "hashmap_lookup");
ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called");
@@ -74,9 +77,10 @@ out:
static void test_array_map(void)
{
__u32 key, num_cpus, max_entries;
- int i, arraymap_fd, percpu_map_fd, err;
+ int i, err;
struct for_each_array_map_elem *skel;
__u64 *percpu_valbuf = NULL;
+ size_t percpu_val_sz;
__u64 val, expected_total;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
@@ -88,7 +92,6 @@ static void test_array_map(void)
if (!ASSERT_OK_PTR(skel, "for_each_array_map_elem__open_and_load"))
return;
- arraymap_fd = bpf_map__fd(skel->maps.arraymap);
expected_total = 0;
max_entries = bpf_map__max_entries(skel->maps.arraymap);
for (i = 0; i < max_entries; i++) {
@@ -97,21 +100,23 @@ static void test_array_map(void)
/* skip the last iteration for expected total */
if (i != max_entries - 1)
expected_total += val;
- err = bpf_map_update_elem(arraymap_fd, &key, &val, BPF_ANY);
+ err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key),
+ &val, sizeof(val), BPF_ANY);
if (!ASSERT_OK(err, "map_update"))
goto out;
}
num_cpus = bpf_num_possible_cpus();
- percpu_map_fd = bpf_map__fd(skel->maps.percpu_map);
- percpu_valbuf = malloc(sizeof(__u64) * num_cpus);
+ percpu_val_sz = sizeof(__u64) * num_cpus;
+ percpu_valbuf = malloc(percpu_val_sz);
if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
goto out;
key = 0;
for (i = 0; i < num_cpus; i++)
percpu_valbuf[i] = i + 1;
- err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY);
+ err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
+ percpu_valbuf, percpu_val_sz, BPF_ANY);
if (!ASSERT_OK(err, "percpu_map_update"))
goto out;
@@ -129,10 +134,21 @@ out:
for_each_array_map_elem__destroy(skel);
}
+static void test_write_map_key(void)
+{
+ struct for_each_map_elem_write_key *skel;
+
+ skel = for_each_map_elem_write_key__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "for_each_map_elem_write_key__open_and_load"))
+ for_each_map_elem_write_key__destroy(skel);
+}
+
void test_for_each(void)
{
if (test__start_subtest("hash_map"))
test_hash_map();
if (test__start_subtest("array_map"))
test_array_map();
+ if (test__start_subtest("write_map_key"))
+ test_write_map_key();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/helper_restricted.c b/tools/testing/selftests/bpf/prog_tests/helper_restricted.c
index e1de5f80c3b2..0354f9b82c65 100644
--- a/tools/testing/selftests/bpf/prog_tests/helper_restricted.c
+++ b/tools/testing/selftests/bpf/prog_tests/helper_restricted.c
@@ -6,11 +6,10 @@
void test_helper_restricted(void)
{
int prog_i = 0, prog_cnt;
- int duration = 0;
do {
struct test_helper_restricted *test;
- int maybeOK;
+ int err;
test = test_helper_restricted__open();
if (!ASSERT_OK_PTR(test, "open"))
@@ -21,12 +20,11 @@ void test_helper_restricted(void)
for (int j = 0; j < prog_cnt; ++j) {
struct bpf_program *prog = *test->skeleton->progs[j].prog;
- maybeOK = bpf_program__set_autoload(prog, prog_i == j);
- ASSERT_OK(maybeOK, "set autoload");
+ bpf_program__set_autoload(prog, true);
}
- maybeOK = test_helper_restricted__load(test);
- CHECK(!maybeOK, test->skeleton->progs[prog_i].name, "helper isn't restricted");
+ err = test_helper_restricted__load(test);
+ ASSERT_ERR(err, "load_should_fail");
test_helper_restricted__destroy(test);
} while (++prog_i < prog_cnt);
diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
index b9876b55fc0c..d457a55ff408 100644
--- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
@@ -2,6 +2,9 @@
#include <test_progs.h>
#include "kprobe_multi.skel.h"
#include "trace_helpers.h"
+#include "kprobe_multi_empty.skel.h"
+#include "bpf/libbpf_internal.h"
+#include "bpf/hashmap.h"
static void kprobe_multi_test_run(struct kprobe_multi *skel, bool test_return)
{
@@ -140,14 +143,14 @@ test_attach_api(const char *pattern, struct bpf_kprobe_multi_opts *opts)
goto cleanup;
skel->bss->pid = getpid();
- link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
+ link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
pattern, opts);
if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts"))
goto cleanup;
if (opts) {
opts->retprobe = true;
- link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe,
+ link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe_manual,
pattern, opts);
if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts"))
goto cleanup;
@@ -232,7 +235,7 @@ static void test_attach_api_fails(void)
skel->bss->pid = getpid();
/* fail_1 - pattern and opts NULL */
- link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
NULL, NULL);
if (!ASSERT_ERR_PTR(link, "fail_1"))
goto cleanup;
@@ -246,7 +249,7 @@ static void test_attach_api_fails(void)
opts.cnt = ARRAY_SIZE(syms);
opts.cookies = NULL;
- link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
NULL, &opts);
if (!ASSERT_ERR_PTR(link, "fail_2"))
goto cleanup;
@@ -260,7 +263,7 @@ static void test_attach_api_fails(void)
opts.cnt = ARRAY_SIZE(syms);
opts.cookies = NULL;
- link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
"ksys_*", &opts);
if (!ASSERT_ERR_PTR(link, "fail_3"))
goto cleanup;
@@ -274,7 +277,7 @@ static void test_attach_api_fails(void)
opts.cnt = ARRAY_SIZE(syms);
opts.cookies = NULL;
- link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
"ksys_*", &opts);
if (!ASSERT_ERR_PTR(link, "fail_4"))
goto cleanup;
@@ -288,7 +291,7 @@ static void test_attach_api_fails(void)
opts.cnt = 0;
opts.cookies = cookies;
- link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe,
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual,
"ksys_*", &opts);
if (!ASSERT_ERR_PTR(link, "fail_5"))
goto cleanup;
@@ -301,6 +304,151 @@ cleanup:
kprobe_multi__destroy(skel);
}
+static inline __u64 get_time_ns(void)
+{
+ struct timespec t;
+
+ clock_gettime(CLOCK_MONOTONIC, &t);
+ return (__u64) t.tv_sec * 1000000000 + t.tv_nsec;
+}
+
+static size_t symbol_hash(const void *key, void *ctx __maybe_unused)
+{
+ return str_hash((const char *) key);
+}
+
+static bool symbol_equal(const void *key1, const void *key2, void *ctx __maybe_unused)
+{
+ return strcmp((const char *) key1, (const char *) key2) == 0;
+}
+
+static int get_syms(char ***symsp, size_t *cntp)
+{
+ size_t cap = 0, cnt = 0, i;
+ char *name, **syms = NULL;
+ struct hashmap *map;
+ char buf[256];
+ FILE *f;
+ int err = 0;
+
+ /*
+ * The available_filter_functions contains many duplicates,
+ * but other than that all symbols are usable in kprobe multi
+ * interface.
+ * Filtering out duplicates by using hashmap__add, which won't
+ * add existing entry.
+ */
+ f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r");
+ if (!f)
+ return -EINVAL;
+
+ map = hashmap__new(symbol_hash, symbol_equal, NULL);
+ if (IS_ERR(map)) {
+ err = libbpf_get_error(map);
+ goto error;
+ }
+
+ while (fgets(buf, sizeof(buf), f)) {
+ /* skip modules */
+ if (strchr(buf, '['))
+ continue;
+ if (sscanf(buf, "%ms$*[^\n]\n", &name) != 1)
+ continue;
+ /*
+ * We attach to almost all kernel functions and some of them
+ * will cause 'suspicious RCU usage' when fprobe is attached
+ * to them. Filter out the current culprits - arch_cpu_idle
+ * and rcu_* functions.
+ */
+ if (!strcmp(name, "arch_cpu_idle"))
+ continue;
+ if (!strncmp(name, "rcu_", 4))
+ continue;
+ if (!strcmp(name, "bpf_dispatcher_xdp_func"))
+ continue;
+ if (!strncmp(name, "__ftrace_invalid_address__",
+ sizeof("__ftrace_invalid_address__") - 1))
+ continue;
+ err = hashmap__add(map, name, NULL);
+ if (err) {
+ free(name);
+ if (err == -EEXIST)
+ continue;
+ goto error;
+ }
+ err = libbpf_ensure_mem((void **) &syms, &cap,
+ sizeof(*syms), cnt + 1);
+ if (err) {
+ free(name);
+ goto error;
+ }
+ syms[cnt] = name;
+ cnt++;
+ }
+
+ *symsp = syms;
+ *cntp = cnt;
+
+error:
+ fclose(f);
+ hashmap__free(map);
+ if (err) {
+ for (i = 0; i < cnt; i++)
+ free(syms[cnt]);
+ free(syms);
+ }
+ return err;
+}
+
+static void test_bench_attach(void)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ struct kprobe_multi_empty *skel = NULL;
+ long attach_start_ns, attach_end_ns;
+ long detach_start_ns, detach_end_ns;
+ double attach_delta, detach_delta;
+ struct bpf_link *link = NULL;
+ char **syms = NULL;
+ size_t cnt = 0, i;
+
+ if (!ASSERT_OK(get_syms(&syms, &cnt), "get_syms"))
+ return;
+
+ skel = kprobe_multi_empty__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "kprobe_multi_empty__open_and_load"))
+ goto cleanup;
+
+ opts.syms = (const char **) syms;
+ opts.cnt = cnt;
+
+ attach_start_ns = get_time_ns();
+ link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_empty,
+ NULL, &opts);
+ attach_end_ns = get_time_ns();
+
+ if (!ASSERT_OK_PTR(link, "bpf_program__attach_kprobe_multi_opts"))
+ goto cleanup;
+
+ detach_start_ns = get_time_ns();
+ bpf_link__destroy(link);
+ detach_end_ns = get_time_ns();
+
+ attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0;
+ detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0;
+
+ printf("%s: found %lu functions\n", __func__, cnt);
+ printf("%s: attached in %7.3lfs\n", __func__, attach_delta);
+ printf("%s: detached in %7.3lfs\n", __func__, detach_delta);
+
+cleanup:
+ kprobe_multi_empty__destroy(skel);
+ if (syms) {
+ for (i = 0; i < cnt; i++)
+ free(syms[i]);
+ free(syms);
+ }
+}
+
void test_kprobe_multi_test(void)
{
if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
@@ -320,4 +468,6 @@ void test_kprobe_multi_test(void)
test_attach_api_syms();
if (test__start_subtest("attach_api_fails"))
test_attach_api_fails();
+ if (test__start_subtest("bench_attach"))
+ test_bench_attach();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c
index f6933b06daf8..1d7a2f1e0731 100644
--- a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c
@@ -138,12 +138,16 @@ cleanup:
test_ksyms_weak_lskel__destroy(skel);
}
-static void test_write_check(void)
+static void test_write_check(bool test_handler1)
{
struct test_ksyms_btf_write_check *skel;
- skel = test_ksyms_btf_write_check__open_and_load();
- ASSERT_ERR_PTR(skel, "unexpected load of a prog writing to ksym memory\n");
+ skel = test_ksyms_btf_write_check__open();
+ if (!ASSERT_OK_PTR(skel, "test_ksyms_btf_write_check__open"))
+ return;
+ bpf_program__set_autoload(test_handler1 ? skel->progs.handler2 : skel->progs.handler1, false);
+ ASSERT_ERR(test_ksyms_btf_write_check__load(skel),
+ "unexpected load of a prog writing to ksym memory\n");
test_ksyms_btf_write_check__destroy(skel);
}
@@ -179,6 +183,9 @@ void test_ksyms_btf(void)
if (test__start_subtest("weak_ksyms_lskel"))
test_weak_syms_lskel();
- if (test__start_subtest("write_check"))
- test_write_check();
+ if (test__start_subtest("write_check1"))
+ test_write_check(true);
+
+ if (test__start_subtest("write_check2"))
+ test_write_check(false);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_str.c b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c
new file mode 100644
index 000000000000..93e9cddaadcf
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <ctype.h>
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+/*
+ * Utility function uppercasing an entire string.
+ */
+static void uppercase(char *s)
+{
+ for (; *s != '\0'; s++)
+ *s = toupper(*s);
+}
+
+/*
+ * Test case to check that all bpf_attach_type variants are covered by
+ * libbpf_bpf_attach_type_str.
+ */
+static void test_libbpf_bpf_attach_type_str(void)
+{
+ struct btf *btf;
+ const struct btf_type *t;
+ const struct btf_enum *e;
+ int i, n, id;
+
+ btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
+ if (!ASSERT_OK_PTR(btf, "btf_parse"))
+ return;
+
+ /* find enum bpf_attach_type and enumerate each value */
+ id = btf__find_by_name_kind(btf, "bpf_attach_type", BTF_KIND_ENUM);
+ if (!ASSERT_GT(id, 0, "bpf_attach_type_id"))
+ goto cleanup;
+ t = btf__type_by_id(btf, id);
+ e = btf_enum(t);
+ n = btf_vlen(t);
+ for (i = 0; i < n; e++, i++) {
+ enum bpf_attach_type attach_type = (enum bpf_attach_type)e->val;
+ const char *attach_type_name;
+ const char *attach_type_str;
+ char buf[256];
+
+ if (attach_type == __MAX_BPF_ATTACH_TYPE)
+ continue;
+
+ attach_type_name = btf__str_by_offset(btf, e->name_off);
+ attach_type_str = libbpf_bpf_attach_type_str(attach_type);
+ ASSERT_OK_PTR(attach_type_str, attach_type_name);
+
+ snprintf(buf, sizeof(buf), "BPF_%s", attach_type_str);
+ uppercase(buf);
+
+ ASSERT_STREQ(buf, attach_type_name, "exp_str_value");
+ }
+
+cleanup:
+ btf__free(btf);
+}
+
+/*
+ * Test case to check that all bpf_link_type variants are covered by
+ * libbpf_bpf_link_type_str.
+ */
+static void test_libbpf_bpf_link_type_str(void)
+{
+ struct btf *btf;
+ const struct btf_type *t;
+ const struct btf_enum *e;
+ int i, n, id;
+
+ btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
+ if (!ASSERT_OK_PTR(btf, "btf_parse"))
+ return;
+
+ /* find enum bpf_link_type and enumerate each value */
+ id = btf__find_by_name_kind(btf, "bpf_link_type", BTF_KIND_ENUM);
+ if (!ASSERT_GT(id, 0, "bpf_link_type_id"))
+ goto cleanup;
+ t = btf__type_by_id(btf, id);
+ e = btf_enum(t);
+ n = btf_vlen(t);
+ for (i = 0; i < n; e++, i++) {
+ enum bpf_link_type link_type = (enum bpf_link_type)e->val;
+ const char *link_type_name;
+ const char *link_type_str;
+ char buf[256];
+
+ if (link_type == MAX_BPF_LINK_TYPE)
+ continue;
+
+ link_type_name = btf__str_by_offset(btf, e->name_off);
+ link_type_str = libbpf_bpf_link_type_str(link_type);
+ ASSERT_OK_PTR(link_type_str, link_type_name);
+
+ snprintf(buf, sizeof(buf), "BPF_LINK_TYPE_%s", link_type_str);
+ uppercase(buf);
+
+ ASSERT_STREQ(buf, link_type_name, "exp_str_value");
+ }
+
+cleanup:
+ btf__free(btf);
+}
+
+/*
+ * Test case to check that all bpf_map_type variants are covered by
+ * libbpf_bpf_map_type_str.
+ */
+static void test_libbpf_bpf_map_type_str(void)
+{
+ struct btf *btf;
+ const struct btf_type *t;
+ const struct btf_enum *e;
+ int i, n, id;
+
+ btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
+ if (!ASSERT_OK_PTR(btf, "btf_parse"))
+ return;
+
+ /* find enum bpf_map_type and enumerate each value */
+ id = btf__find_by_name_kind(btf, "bpf_map_type", BTF_KIND_ENUM);
+ if (!ASSERT_GT(id, 0, "bpf_map_type_id"))
+ goto cleanup;
+ t = btf__type_by_id(btf, id);
+ e = btf_enum(t);
+ n = btf_vlen(t);
+ for (i = 0; i < n; e++, i++) {
+ enum bpf_map_type map_type = (enum bpf_map_type)e->val;
+ const char *map_type_name;
+ const char *map_type_str;
+ char buf[256];
+
+ map_type_name = btf__str_by_offset(btf, e->name_off);
+ map_type_str = libbpf_bpf_map_type_str(map_type);
+ ASSERT_OK_PTR(map_type_str, map_type_name);
+
+ snprintf(buf, sizeof(buf), "BPF_MAP_TYPE_%s", map_type_str);
+ uppercase(buf);
+
+ ASSERT_STREQ(buf, map_type_name, "exp_str_value");
+ }
+
+cleanup:
+ btf__free(btf);
+}
+
+/*
+ * Test case to check that all bpf_prog_type variants are covered by
+ * libbpf_bpf_prog_type_str.
+ */
+static void test_libbpf_bpf_prog_type_str(void)
+{
+ struct btf *btf;
+ const struct btf_type *t;
+ const struct btf_enum *e;
+ int i, n, id;
+
+ btf = btf__parse("/sys/kernel/btf/vmlinux", NULL);
+ if (!ASSERT_OK_PTR(btf, "btf_parse"))
+ return;
+
+ /* find enum bpf_prog_type and enumerate each value */
+ id = btf__find_by_name_kind(btf, "bpf_prog_type", BTF_KIND_ENUM);
+ if (!ASSERT_GT(id, 0, "bpf_prog_type_id"))
+ goto cleanup;
+ t = btf__type_by_id(btf, id);
+ e = btf_enum(t);
+ n = btf_vlen(t);
+ for (i = 0; i < n; e++, i++) {
+ enum bpf_prog_type prog_type = (enum bpf_prog_type)e->val;
+ const char *prog_type_name;
+ const char *prog_type_str;
+ char buf[256];
+
+ prog_type_name = btf__str_by_offset(btf, e->name_off);
+ prog_type_str = libbpf_bpf_prog_type_str(prog_type);
+ ASSERT_OK_PTR(prog_type_str, prog_type_name);
+
+ snprintf(buf, sizeof(buf), "BPF_PROG_TYPE_%s", prog_type_str);
+ uppercase(buf);
+
+ ASSERT_STREQ(buf, prog_type_name, "exp_str_value");
+ }
+
+cleanup:
+ btf__free(btf);
+}
+
+/*
+ * Run all libbpf str conversion tests.
+ */
+void test_libbpf_str(void)
+{
+ if (test__start_subtest("bpf_attach_type_str"))
+ test_libbpf_bpf_attach_type_str();
+
+ if (test__start_subtest("bpf_link_type_str"))
+ test_libbpf_bpf_link_type_str();
+
+ if (test__start_subtest("bpf_map_type_str"))
+ test_libbpf_bpf_map_type_str();
+
+ if (test__start_subtest("bpf_prog_type_str"))
+ test_libbpf_bpf_prog_type_str();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c
index e9916f2817ec..cad664546912 100644
--- a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c
+++ b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c
@@ -14,6 +14,12 @@ void test_linked_funcs(void)
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
+ /* handler1 and handler2 are marked as SEC("?raw_tp/sys_enter") and
+ * are set to not autoload by default
+ */
+ bpf_program__set_autoload(skel->progs.handler1, true);
+ bpf_program__set_autoload(skel->progs.handler2, true);
+
skel->rodata->my_tid = syscall(SYS_gettid);
skel->bss->syscall_id = SYS_getpgid;
diff --git a/tools/testing/selftests/bpf/prog_tests/log_fixup.c b/tools/testing/selftests/bpf/prog_tests/log_fixup.c
new file mode 100644
index 000000000000..f4ffdcabf4e4
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/log_fixup.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+#include "test_log_fixup.skel.h"
+
+enum trunc_type {
+ TRUNC_NONE,
+ TRUNC_PARTIAL,
+ TRUNC_FULL,
+};
+
+static void bad_core_relo(size_t log_buf_size, enum trunc_type trunc_type)
+{
+ char log_buf[8 * 1024];
+ struct test_log_fixup* skel;
+ int err;
+
+ skel = test_log_fixup__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.bad_relo, true);
+ memset(log_buf, 0, sizeof(log_buf));
+ bpf_program__set_log_buf(skel->progs.bad_relo, log_buf, log_buf_size ?: sizeof(log_buf));
+
+ err = test_log_fixup__load(skel);
+ if (!ASSERT_ERR(err, "load_fail"))
+ goto cleanup;
+
+ ASSERT_HAS_SUBSTR(log_buf,
+ "0: <invalid CO-RE relocation>\n"
+ "failed to resolve CO-RE relocation <byte_sz> ",
+ "log_buf_part1");
+
+ switch (trunc_type) {
+ case TRUNC_NONE:
+ ASSERT_HAS_SUBSTR(log_buf,
+ "struct task_struct___bad.fake_field (0:1 @ offset 4)\n",
+ "log_buf_part2");
+ ASSERT_HAS_SUBSTR(log_buf,
+ "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n",
+ "log_buf_end");
+ break;
+ case TRUNC_PARTIAL:
+ /* we should get full libbpf message patch */
+ ASSERT_HAS_SUBSTR(log_buf,
+ "struct task_struct___bad.fake_field (0:1 @ offset 4)\n",
+ "log_buf_part2");
+ /* we shouldn't get full end of BPF verifier log */
+ ASSERT_NULL(strstr(log_buf, "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n"),
+ "log_buf_end");
+ break;
+ case TRUNC_FULL:
+ /* we shouldn't get second part of libbpf message patch */
+ ASSERT_NULL(strstr(log_buf, "struct task_struct___bad.fake_field (0:1 @ offset 4)\n"),
+ "log_buf_part2");
+ /* we shouldn't get full end of BPF verifier log */
+ ASSERT_NULL(strstr(log_buf, "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n"),
+ "log_buf_end");
+ break;
+ }
+
+ if (env.verbosity > VERBOSE_NONE)
+ printf("LOG: \n=================\n%s=================\n", log_buf);
+cleanup:
+ test_log_fixup__destroy(skel);
+}
+
+static void bad_core_relo_subprog(void)
+{
+ char log_buf[8 * 1024];
+ struct test_log_fixup* skel;
+ int err;
+
+ skel = test_log_fixup__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.bad_relo_subprog, true);
+ bpf_program__set_log_buf(skel->progs.bad_relo_subprog, log_buf, sizeof(log_buf));
+
+ err = test_log_fixup__load(skel);
+ if (!ASSERT_ERR(err, "load_fail"))
+ goto cleanup;
+
+ ASSERT_HAS_SUBSTR(log_buf,
+ ": <invalid CO-RE relocation>\n"
+ "failed to resolve CO-RE relocation <byte_off> ",
+ "log_buf");
+ ASSERT_HAS_SUBSTR(log_buf,
+ "struct task_struct___bad.fake_field_subprog (0:2 @ offset 8)\n",
+ "log_buf");
+
+ if (env.verbosity > VERBOSE_NONE)
+ printf("LOG: \n=================\n%s=================\n", log_buf);
+
+cleanup:
+ test_log_fixup__destroy(skel);
+}
+
+static void missing_map(void)
+{
+ char log_buf[8 * 1024];
+ struct test_log_fixup* skel;
+ int err;
+
+ skel = test_log_fixup__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bpf_map__set_autocreate(skel->maps.missing_map, false);
+
+ bpf_program__set_autoload(skel->progs.use_missing_map, true);
+ bpf_program__set_log_buf(skel->progs.use_missing_map, log_buf, sizeof(log_buf));
+
+ err = test_log_fixup__load(skel);
+ if (!ASSERT_ERR(err, "load_fail"))
+ goto cleanup;
+
+ ASSERT_TRUE(bpf_map__autocreate(skel->maps.existing_map), "existing_map_autocreate");
+ ASSERT_FALSE(bpf_map__autocreate(skel->maps.missing_map), "missing_map_autocreate");
+
+ ASSERT_HAS_SUBSTR(log_buf,
+ "8: <invalid BPF map reference>\n"
+ "BPF map 'missing_map' is referenced but wasn't created\n",
+ "log_buf");
+
+ if (env.verbosity > VERBOSE_NONE)
+ printf("LOG: \n=================\n%s=================\n", log_buf);
+
+cleanup:
+ test_log_fixup__destroy(skel);
+}
+
+void test_log_fixup(void)
+{
+ if (test__start_subtest("bad_core_relo_trunc_none"))
+ bad_core_relo(0, TRUNC_NONE /* full buf */);
+ if (test__start_subtest("bad_core_relo_trunc_partial"))
+ bad_core_relo(300, TRUNC_PARTIAL /* truncate original log a bit */);
+ if (test__start_subtest("bad_core_relo_trunc_full"))
+ bad_core_relo(250, TRUNC_FULL /* truncate also libbpf's message patch */);
+ if (test__start_subtest("bad_core_relo_subprog"))
+ bad_core_relo_subprog();
+ if (test__start_subtest("missing_map"))
+ missing_map();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c b/tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c
index beebfa9730e1..a767bb4a271c 100644
--- a/tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c
+++ b/tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c
@@ -112,7 +112,8 @@ static void test_lookup_and_delete_hash(void)
/* Lookup and delete element. */
key = 1;
- err = bpf_map_lookup_and_delete_elem(map_fd, &key, &value);
+ err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
+ &key, sizeof(key), &value, sizeof(value), 0);
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
goto cleanup;
@@ -147,7 +148,8 @@ static void test_lookup_and_delete_percpu_hash(void)
/* Lookup and delete element. */
key = 1;
- err = bpf_map_lookup_and_delete_elem(map_fd, &key, value);
+ err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
+ &key, sizeof(key), value, sizeof(value), 0);
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
goto cleanup;
@@ -191,7 +193,8 @@ static void test_lookup_and_delete_lru_hash(void)
goto cleanup;
/* Lookup and delete element 3. */
- err = bpf_map_lookup_and_delete_elem(map_fd, &key, &value);
+ err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
+ &key, sizeof(key), &value, sizeof(value), 0);
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
goto cleanup;
@@ -240,10 +243,10 @@ static void test_lookup_and_delete_lru_percpu_hash(void)
value[i] = 0;
/* Lookup and delete element 3. */
- err = bpf_map_lookup_and_delete_elem(map_fd, &key, value);
- if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) {
+ err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
+ &key, sizeof(key), value, sizeof(value), 0);
+ if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
goto cleanup;
- }
/* Check if only one CPU has set the value. */
for (i = 0; i < nr_cpus; i++) {
diff --git a/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c
new file mode 100644
index 000000000000..1102e4f42d2d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+#include "lsm_cgroup.skel.h"
+#include "lsm_cgroup_nonvoid.skel.h"
+#include "cgroup_helpers.h"
+#include "network_helpers.h"
+
+#ifndef ENOTSUPP
+#define ENOTSUPP 524
+#endif
+
+static struct btf *btf;
+
+static __u32 query_prog_cnt(int cgroup_fd, const char *attach_func)
+{
+ LIBBPF_OPTS(bpf_prog_query_opts, p);
+ int cnt = 0;
+ int i;
+
+ ASSERT_OK(bpf_prog_query_opts(cgroup_fd, BPF_LSM_CGROUP, &p), "prog_query");
+
+ if (!attach_func)
+ return p.prog_cnt;
+
+ /* When attach_func is provided, count the number of progs that
+ * attach to the given symbol.
+ */
+
+ if (!btf)
+ btf = btf__load_vmlinux_btf();
+ if (!ASSERT_OK(libbpf_get_error(btf), "btf_vmlinux"))
+ return -1;
+
+ p.prog_ids = malloc(sizeof(u32) * p.prog_cnt);
+ p.prog_attach_flags = malloc(sizeof(u32) * p.prog_cnt);
+ ASSERT_OK(bpf_prog_query_opts(cgroup_fd, BPF_LSM_CGROUP, &p), "prog_query");
+
+ for (i = 0; i < p.prog_cnt; i++) {
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ int fd;
+
+ fd = bpf_prog_get_fd_by_id(p.prog_ids[i]);
+ ASSERT_GE(fd, 0, "prog_get_fd_by_id");
+ ASSERT_OK(bpf_obj_get_info_by_fd(fd, &info, &info_len), "prog_info_by_fd");
+ close(fd);
+
+ if (info.attach_btf_id ==
+ btf__find_by_name_kind(btf, attach_func, BTF_KIND_FUNC))
+ cnt++;
+ }
+
+ free(p.prog_ids);
+ free(p.prog_attach_flags);
+
+ return cnt;
+}
+
+static void test_lsm_cgroup_functional(void)
+{
+ DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, attach_opts);
+ DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts);
+ int cgroup_fd = -1, cgroup_fd2 = -1, cgroup_fd3 = -1;
+ int listen_fd, client_fd, accepted_fd;
+ struct lsm_cgroup *skel = NULL;
+ int post_create_prog_fd2 = -1;
+ int post_create_prog_fd = -1;
+ int bind_link_fd2 = -1;
+ int bind_prog_fd2 = -1;
+ int alloc_prog_fd = -1;
+ int bind_prog_fd = -1;
+ int bind_link_fd = -1;
+ int clone_prog_fd = -1;
+ int err, fd, prio;
+ socklen_t socklen;
+
+ cgroup_fd3 = test__join_cgroup("/sock_policy_empty");
+ if (!ASSERT_GE(cgroup_fd3, 0, "create empty cgroup"))
+ goto close_cgroup;
+
+ cgroup_fd2 = test__join_cgroup("/sock_policy_reuse");
+ if (!ASSERT_GE(cgroup_fd2, 0, "create cgroup for reuse"))
+ goto close_cgroup;
+
+ cgroup_fd = test__join_cgroup("/sock_policy");
+ if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup"))
+ goto close_cgroup;
+
+ skel = lsm_cgroup__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "open_and_load"))
+ goto close_cgroup;
+
+ post_create_prog_fd = bpf_program__fd(skel->progs.socket_post_create);
+ post_create_prog_fd2 = bpf_program__fd(skel->progs.socket_post_create2);
+ bind_prog_fd = bpf_program__fd(skel->progs.socket_bind);
+ bind_prog_fd2 = bpf_program__fd(skel->progs.socket_bind2);
+ alloc_prog_fd = bpf_program__fd(skel->progs.socket_alloc);
+ clone_prog_fd = bpf_program__fd(skel->progs.socket_clone);
+
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_sk_alloc_security"), 0, "prog count");
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 0, "total prog count");
+ err = bpf_prog_attach(alloc_prog_fd, cgroup_fd, BPF_LSM_CGROUP, 0);
+ if (err == -ENOTSUPP) {
+ test__skip();
+ goto close_cgroup;
+ }
+ if (!ASSERT_OK(err, "attach alloc_prog_fd"))
+ goto detach_cgroup;
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_sk_alloc_security"), 1, "prog count");
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 1, "total prog count");
+
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_inet_csk_clone"), 0, "prog count");
+ err = bpf_prog_attach(clone_prog_fd, cgroup_fd, BPF_LSM_CGROUP, 0);
+ if (!ASSERT_OK(err, "attach clone_prog_fd"))
+ goto detach_cgroup;
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_inet_csk_clone"), 1, "prog count");
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 2, "total prog count");
+
+ /* Make sure replacing works. */
+
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_post_create"), 0, "prog count");
+ err = bpf_prog_attach(post_create_prog_fd, cgroup_fd,
+ BPF_LSM_CGROUP, 0);
+ if (!ASSERT_OK(err, "attach post_create_prog_fd"))
+ goto detach_cgroup;
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_post_create"), 1, "prog count");
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 3, "total prog count");
+
+ attach_opts.replace_prog_fd = post_create_prog_fd;
+ err = bpf_prog_attach_opts(post_create_prog_fd2, cgroup_fd,
+ BPF_LSM_CGROUP, &attach_opts);
+ if (!ASSERT_OK(err, "prog replace post_create_prog_fd"))
+ goto detach_cgroup;
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_post_create"), 1, "prog count");
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 3, "total prog count");
+
+ /* Try the same attach/replace via link API. */
+
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 0, "prog count");
+ bind_link_fd = bpf_link_create(bind_prog_fd, cgroup_fd,
+ BPF_LSM_CGROUP, NULL);
+ if (!ASSERT_GE(bind_link_fd, 0, "link create bind_prog_fd"))
+ goto detach_cgroup;
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 1, "prog count");
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count");
+
+ update_opts.old_prog_fd = bind_prog_fd;
+ update_opts.flags = BPF_F_REPLACE;
+
+ err = bpf_link_update(bind_link_fd, bind_prog_fd2, &update_opts);
+ if (!ASSERT_OK(err, "link update bind_prog_fd"))
+ goto detach_cgroup;
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 1, "prog count");
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count");
+
+ /* Attach another instance of bind program to another cgroup.
+ * This should trigger the reuse of the trampoline shim (two
+ * programs attaching to the same btf_id).
+ */
+
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 1, "prog count");
+ ASSERT_EQ(query_prog_cnt(cgroup_fd2, "bpf_lsm_socket_bind"), 0, "prog count");
+ bind_link_fd2 = bpf_link_create(bind_prog_fd2, cgroup_fd2,
+ BPF_LSM_CGROUP, NULL);
+ if (!ASSERT_GE(bind_link_fd2, 0, "link create bind_prog_fd2"))
+ goto detach_cgroup;
+ ASSERT_EQ(query_prog_cnt(cgroup_fd2, "bpf_lsm_socket_bind"), 1, "prog count");
+ ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count");
+ ASSERT_EQ(query_prog_cnt(cgroup_fd2, NULL), 1, "total prog count");
+
+ /* AF_UNIX is prohibited. */
+
+ fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_LT(fd, 0, "socket(AF_UNIX)");
+ close(fd);
+
+ /* AF_INET6 gets default policy (sk_priority). */
+
+ fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (!ASSERT_GE(fd, 0, "socket(SOCK_STREAM)"))
+ goto detach_cgroup;
+
+ prio = 0;
+ socklen = sizeof(prio);
+ ASSERT_GE(getsockopt(fd, SOL_SOCKET, SO_PRIORITY, &prio, &socklen), 0,
+ "getsockopt");
+ ASSERT_EQ(prio, 123, "sk_priority");
+
+ close(fd);
+
+ /* TX-only AF_PACKET is allowed. */
+
+ ASSERT_LT(socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL)), 0,
+ "socket(AF_PACKET, ..., ETH_P_ALL)");
+
+ fd = socket(AF_PACKET, SOCK_RAW, 0);
+ ASSERT_GE(fd, 0, "socket(AF_PACKET, ..., 0)");
+
+ /* TX-only AF_PACKET can not be rebound. */
+
+ struct sockaddr_ll sa = {
+ .sll_family = AF_PACKET,
+ .sll_protocol = htons(ETH_P_ALL),
+ };
+ ASSERT_LT(bind(fd, (struct sockaddr *)&sa, sizeof(sa)), 0,
+ "bind(ETH_P_ALL)");
+
+ close(fd);
+
+ /* Trigger passive open. */
+
+ listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
+ ASSERT_GE(listen_fd, 0, "start_server");
+ client_fd = connect_to_fd(listen_fd, 0);
+ ASSERT_GE(client_fd, 0, "connect_to_fd");
+ accepted_fd = accept(listen_fd, NULL, NULL);
+ ASSERT_GE(accepted_fd, 0, "accept");
+
+ prio = 0;
+ socklen = sizeof(prio);
+ ASSERT_GE(getsockopt(accepted_fd, SOL_SOCKET, SO_PRIORITY, &prio, &socklen), 0,
+ "getsockopt");
+ ASSERT_EQ(prio, 234, "sk_priority");
+
+ /* These are replaced and never called. */
+ ASSERT_EQ(skel->bss->called_socket_post_create, 0, "called_create");
+ ASSERT_EQ(skel->bss->called_socket_bind, 0, "called_bind");
+
+ /* AF_INET6+SOCK_STREAM
+ * AF_PACKET+SOCK_RAW
+ * listen_fd
+ * client_fd
+ * accepted_fd
+ */
+ ASSERT_EQ(skel->bss->called_socket_post_create2, 5, "called_create2");
+
+ /* start_server
+ * bind(ETH_P_ALL)
+ */
+ ASSERT_EQ(skel->bss->called_socket_bind2, 2, "called_bind2");
+ /* Single accept(). */
+ ASSERT_EQ(skel->bss->called_socket_clone, 1, "called_clone");
+
+ /* AF_UNIX+SOCK_STREAM (failed)
+ * AF_INET6+SOCK_STREAM
+ * AF_PACKET+SOCK_RAW (failed)
+ * AF_PACKET+SOCK_RAW
+ * listen_fd
+ * client_fd
+ * accepted_fd
+ */
+ ASSERT_EQ(skel->bss->called_socket_alloc, 7, "called_alloc");
+
+ close(listen_fd);
+ close(client_fd);
+ close(accepted_fd);
+
+ /* Make sure other cgroup doesn't trigger the programs. */
+
+ if (!ASSERT_OK(join_cgroup("/sock_policy_empty"), "join root cgroup"))
+ goto detach_cgroup;
+
+ fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (!ASSERT_GE(fd, 0, "socket(SOCK_STREAM)"))
+ goto detach_cgroup;
+
+ prio = 0;
+ socklen = sizeof(prio);
+ ASSERT_GE(getsockopt(fd, SOL_SOCKET, SO_PRIORITY, &prio, &socklen), 0,
+ "getsockopt");
+ ASSERT_EQ(prio, 0, "sk_priority");
+
+ close(fd);
+
+detach_cgroup:
+ ASSERT_GE(bpf_prog_detach2(post_create_prog_fd2, cgroup_fd,
+ BPF_LSM_CGROUP), 0, "detach_create");
+ close(bind_link_fd);
+ /* Don't close bind_link_fd2, exercise cgroup release cleanup. */
+ ASSERT_GE(bpf_prog_detach2(alloc_prog_fd, cgroup_fd,
+ BPF_LSM_CGROUP), 0, "detach_alloc");
+ ASSERT_GE(bpf_prog_detach2(clone_prog_fd, cgroup_fd,
+ BPF_LSM_CGROUP), 0, "detach_clone");
+
+close_cgroup:
+ close(cgroup_fd);
+ close(cgroup_fd2);
+ close(cgroup_fd3);
+ lsm_cgroup__destroy(skel);
+}
+
+static void test_lsm_cgroup_nonvoid(void)
+{
+ struct lsm_cgroup_nonvoid *skel = NULL;
+
+ skel = lsm_cgroup_nonvoid__open_and_load();
+ ASSERT_NULL(skel, "open succeeds");
+ lsm_cgroup_nonvoid__destroy(skel);
+}
+
+void test_lsm_cgroup(void)
+{
+ if (test__start_subtest("functional"))
+ test_lsm_cgroup_functional();
+ if (test__start_subtest("nonvoid"))
+ test_lsm_cgroup_nonvoid();
+ btf__free(btf);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/map_kptr.c b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
new file mode 100644
index 000000000000..fdcea7a61491
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "map_kptr.skel.h"
+#include "map_kptr_fail.skel.h"
+
+static char log_buf[1024 * 1024];
+
+struct {
+ const char *prog_name;
+ const char *err_msg;
+} map_kptr_fail_tests[] = {
+ { "size_not_bpf_dw", "kptr access size must be BPF_DW" },
+ { "non_const_var_off", "kptr access cannot have variable offset" },
+ { "non_const_var_off_kptr_xchg", "R1 doesn't have constant offset. kptr has to be" },
+ { "misaligned_access_write", "kptr access misaligned expected=8 off=7" },
+ { "misaligned_access_read", "kptr access misaligned expected=8 off=1" },
+ { "reject_var_off_store", "variable untrusted_ptr_ access var_off=(0x0; 0x1e0)" },
+ { "reject_bad_type_match", "invalid kptr access, R1 type=untrusted_ptr_prog_test_ref_kfunc" },
+ { "marked_as_untrusted_or_null", "R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_" },
+ { "correct_btf_id_check_size", "access beyond struct prog_test_ref_kfunc at off 32 size 4" },
+ { "inherit_untrusted_on_walk", "R1 type=untrusted_ptr_ expected=percpu_ptr_" },
+ { "reject_kptr_xchg_on_unref", "off=8 kptr isn't referenced kptr" },
+ { "reject_kptr_get_no_map_val", "arg#0 expected pointer to map value" },
+ { "reject_kptr_get_no_null_map_val", "arg#0 expected pointer to map value" },
+ { "reject_kptr_get_no_kptr", "arg#0 no referenced kptr at map value offset=0" },
+ { "reject_kptr_get_on_unref", "arg#0 no referenced kptr at map value offset=8" },
+ { "reject_kptr_get_bad_type_match", "kernel function bpf_kfunc_call_test_kptr_get args#0" },
+ { "mark_ref_as_untrusted_or_null", "R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_" },
+ { "reject_untrusted_store_to_ref", "store to referenced kptr disallowed" },
+ { "reject_bad_type_xchg", "invalid kptr access, R2 type=ptr_prog_test_ref_kfunc expected=ptr_prog_test_member" },
+ { "reject_untrusted_xchg", "R2 type=untrusted_ptr_ expected=ptr_" },
+ { "reject_member_of_ref_xchg", "invalid kptr access, R2 type=ptr_prog_test_ref_kfunc" },
+ { "reject_indirect_helper_access", "kptr cannot be accessed indirectly by helper" },
+ { "reject_indirect_global_func_access", "kptr cannot be accessed indirectly by helper" },
+ { "kptr_xchg_ref_state", "Unreleased reference id=5 alloc_insn=" },
+ { "kptr_get_ref_state", "Unreleased reference id=3 alloc_insn=" },
+};
+
+static void test_map_kptr_fail_prog(const char *prog_name, const char *err_msg)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
+ .kernel_log_size = sizeof(log_buf),
+ .kernel_log_level = 1);
+ struct map_kptr_fail *skel;
+ struct bpf_program *prog;
+ int ret;
+
+ skel = map_kptr_fail__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "map_kptr_fail__open_opts"))
+ return;
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto end;
+
+ bpf_program__set_autoload(prog, true);
+
+ ret = map_kptr_fail__load(skel);
+ if (!ASSERT_ERR(ret, "map_kptr__load must fail"))
+ goto end;
+
+ if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) {
+ fprintf(stderr, "Expected: %s\n", err_msg);
+ fprintf(stderr, "Verifier: %s\n", log_buf);
+ }
+
+end:
+ map_kptr_fail__destroy(skel);
+}
+
+static void test_map_kptr_fail(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(map_kptr_fail_tests); i++) {
+ if (!test__start_subtest(map_kptr_fail_tests[i].prog_name))
+ continue;
+ test_map_kptr_fail_prog(map_kptr_fail_tests[i].prog_name,
+ map_kptr_fail_tests[i].err_msg);
+ }
+}
+
+static void test_map_kptr_success(bool test_run)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+ struct map_kptr *skel;
+ int key = 0, ret;
+ char buf[16];
+
+ skel = map_kptr__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load"))
+ return;
+
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref), &opts);
+ ASSERT_OK(ret, "test_map_kptr_ref refcount");
+ ASSERT_OK(opts.retval, "test_map_kptr_ref retval");
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref2), &opts);
+ ASSERT_OK(ret, "test_map_kptr_ref2 refcount");
+ ASSERT_OK(opts.retval, "test_map_kptr_ref2 retval");
+
+ if (test_run)
+ return;
+
+ ret = bpf_map__update_elem(skel->maps.array_map,
+ &key, sizeof(key), buf, sizeof(buf), 0);
+ ASSERT_OK(ret, "array_map update");
+ ret = bpf_map__update_elem(skel->maps.array_map,
+ &key, sizeof(key), buf, sizeof(buf), 0);
+ ASSERT_OK(ret, "array_map update2");
+
+ ret = bpf_map__update_elem(skel->maps.hash_map,
+ &key, sizeof(key), buf, sizeof(buf), 0);
+ ASSERT_OK(ret, "hash_map update");
+ ret = bpf_map__delete_elem(skel->maps.hash_map, &key, sizeof(key), 0);
+ ASSERT_OK(ret, "hash_map delete");
+
+ ret = bpf_map__update_elem(skel->maps.hash_malloc_map,
+ &key, sizeof(key), buf, sizeof(buf), 0);
+ ASSERT_OK(ret, "hash_malloc_map update");
+ ret = bpf_map__delete_elem(skel->maps.hash_malloc_map, &key, sizeof(key), 0);
+ ASSERT_OK(ret, "hash_malloc_map delete");
+
+ ret = bpf_map__update_elem(skel->maps.lru_hash_map,
+ &key, sizeof(key), buf, sizeof(buf), 0);
+ ASSERT_OK(ret, "lru_hash_map update");
+ ret = bpf_map__delete_elem(skel->maps.lru_hash_map, &key, sizeof(key), 0);
+ ASSERT_OK(ret, "lru_hash_map delete");
+
+ map_kptr__destroy(skel);
+}
+
+void test_map_kptr(void)
+{
+ if (test__start_subtest("success")) {
+ test_map_kptr_success(false);
+ /* Do test_run twice, so that we see refcount going back to 1
+ * after we leave it in map from first iteration.
+ */
+ test_map_kptr_success(true);
+ }
+ test_map_kptr_fail();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c b/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c
new file mode 100644
index 000000000000..bfb1bf3fd427
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Bytedance */
+
+#include <test_progs.h>
+#include "test_map_lookup_percpu_elem.skel.h"
+
+void test_map_lookup_percpu_elem(void)
+{
+ struct test_map_lookup_percpu_elem *skel;
+ __u64 key = 0, sum;
+ int ret, i, nr_cpus = libbpf_num_possible_cpus();
+ __u64 *buf;
+
+ buf = malloc(nr_cpus*sizeof(__u64));
+ if (!ASSERT_OK_PTR(buf, "malloc"))
+ return;
+
+ for (i = 0; i < nr_cpus; i++)
+ buf[i] = i;
+ sum = (nr_cpus - 1) * nr_cpus / 2;
+
+ skel = test_map_lookup_percpu_elem__open();
+ if (!ASSERT_OK_PTR(skel, "test_map_lookup_percpu_elem__open"))
+ goto exit;
+
+ skel->rodata->my_pid = getpid();
+ skel->rodata->nr_cpus = nr_cpus;
+
+ ret = test_map_lookup_percpu_elem__load(skel);
+ if (!ASSERT_OK(ret, "test_map_lookup_percpu_elem__load"))
+ goto cleanup;
+
+ ret = test_map_lookup_percpu_elem__attach(skel);
+ if (!ASSERT_OK(ret, "test_map_lookup_percpu_elem__attach"))
+ goto cleanup;
+
+ ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_array_map), &key, buf, 0);
+ ASSERT_OK(ret, "percpu_array_map update");
+
+ ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_hash_map), &key, buf, 0);
+ ASSERT_OK(ret, "percpu_hash_map update");
+
+ ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_lru_hash_map), &key, buf, 0);
+ ASSERT_OK(ret, "percpu_lru_hash_map update");
+
+ syscall(__NR_getuid);
+
+ test_map_lookup_percpu_elem__detach(skel);
+
+ ASSERT_EQ(skel->bss->percpu_array_elem_sum, sum, "percpu_array lookup percpu elem");
+ ASSERT_EQ(skel->bss->percpu_hash_elem_sum, sum, "percpu_hash lookup percpu elem");
+ ASSERT_EQ(skel->bss->percpu_lru_hash_elem_sum, sum, "percpu_lru_hash lookup percpu elem");
+
+cleanup:
+ test_map_lookup_percpu_elem__destroy(skel);
+exit:
+ free(buf);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c
new file mode 100644
index 000000000000..59f08d6d1d53
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Tessares SA. */
+/* Copyright (c) 2022, SUSE. */
+
+#include <test_progs.h>
+#include "cgroup_helpers.h"
+#include "network_helpers.h"
+#include "mptcp_sock.skel.h"
+
+#ifndef TCP_CA_NAME_MAX
+#define TCP_CA_NAME_MAX 16
+#endif
+
+struct mptcp_storage {
+ __u32 invoked;
+ __u32 is_mptcp;
+ struct sock *sk;
+ __u32 token;
+ struct sock *first;
+ char ca_name[TCP_CA_NAME_MAX];
+};
+
+static int verify_tsk(int map_fd, int client_fd)
+{
+ int err, cfd = client_fd;
+ struct mptcp_storage val;
+
+ err = bpf_map_lookup_elem(map_fd, &cfd, &val);
+ if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
+ return err;
+
+ if (!ASSERT_EQ(val.invoked, 1, "unexpected invoked count"))
+ err++;
+
+ if (!ASSERT_EQ(val.is_mptcp, 0, "unexpected is_mptcp"))
+ err++;
+
+ return err;
+}
+
+static void get_msk_ca_name(char ca_name[])
+{
+ size_t len;
+ int fd;
+
+ fd = open("/proc/sys/net/ipv4/tcp_congestion_control", O_RDONLY);
+ if (!ASSERT_GE(fd, 0, "failed to open tcp_congestion_control"))
+ return;
+
+ len = read(fd, ca_name, TCP_CA_NAME_MAX);
+ if (!ASSERT_GT(len, 0, "failed to read ca_name"))
+ goto err;
+
+ if (len > 0 && ca_name[len - 1] == '\n')
+ ca_name[len - 1] = '\0';
+
+err:
+ close(fd);
+}
+
+static int verify_msk(int map_fd, int client_fd, __u32 token)
+{
+ char ca_name[TCP_CA_NAME_MAX];
+ int err, cfd = client_fd;
+ struct mptcp_storage val;
+
+ if (!ASSERT_GT(token, 0, "invalid token"))
+ return -1;
+
+ get_msk_ca_name(ca_name);
+
+ err = bpf_map_lookup_elem(map_fd, &cfd, &val);
+ if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
+ return err;
+
+ if (!ASSERT_EQ(val.invoked, 1, "unexpected invoked count"))
+ err++;
+
+ if (!ASSERT_EQ(val.is_mptcp, 1, "unexpected is_mptcp"))
+ err++;
+
+ if (!ASSERT_EQ(val.token, token, "unexpected token"))
+ err++;
+
+ if (!ASSERT_EQ(val.first, val.sk, "unexpected first"))
+ err++;
+
+ if (!ASSERT_STRNEQ(val.ca_name, ca_name, TCP_CA_NAME_MAX, "unexpected ca_name"))
+ err++;
+
+ return err;
+}
+
+static int run_test(int cgroup_fd, int server_fd, bool is_mptcp)
+{
+ int client_fd, prog_fd, map_fd, err;
+ struct mptcp_sock *sock_skel;
+
+ sock_skel = mptcp_sock__open_and_load();
+ if (!ASSERT_OK_PTR(sock_skel, "skel_open_load"))
+ return -EIO;
+
+ err = mptcp_sock__attach(sock_skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto out;
+
+ prog_fd = bpf_program__fd(sock_skel->progs._sockops);
+ if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd")) {
+ err = -EIO;
+ goto out;
+ }
+
+ map_fd = bpf_map__fd(sock_skel->maps.socket_storage_map);
+ if (!ASSERT_GE(map_fd, 0, "bpf_map__fd")) {
+ err = -EIO;
+ goto out;
+ }
+
+ err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach"))
+ goto out;
+
+ client_fd = connect_to_fd(server_fd, 0);
+ if (!ASSERT_GE(client_fd, 0, "connect to fd")) {
+ err = -EIO;
+ goto out;
+ }
+
+ err += is_mptcp ? verify_msk(map_fd, client_fd, sock_skel->bss->token) :
+ verify_tsk(map_fd, client_fd);
+
+ close(client_fd);
+
+out:
+ mptcp_sock__destroy(sock_skel);
+ return err;
+}
+
+static void test_base(void)
+{
+ int server_fd, cgroup_fd;
+
+ cgroup_fd = test__join_cgroup("/mptcp");
+ if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup"))
+ return;
+
+ /* without MPTCP */
+ server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
+ if (!ASSERT_GE(server_fd, 0, "start_server"))
+ goto with_mptcp;
+
+ ASSERT_OK(run_test(cgroup_fd, server_fd, false), "run_test tcp");
+
+ close(server_fd);
+
+with_mptcp:
+ /* with MPTCP */
+ server_fd = start_mptcp_server(AF_INET, NULL, 0, 0);
+ if (!ASSERT_GE(server_fd, 0, "start_mptcp_server"))
+ goto close_cgroup_fd;
+
+ ASSERT_OK(run_test(cgroup_fd, server_fd, true), "run_test mptcp");
+
+ close(server_fd);
+
+close_cgroup_fd:
+ close(cgroup_fd);
+}
+
+void test_mptcp(void)
+{
+ if (test__start_subtest("base"))
+ test_base();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/netcnt.c b/tools/testing/selftests/bpf/prog_tests/netcnt.c
index 954964f0ac3d..d3915c58d0e1 100644
--- a/tools/testing/selftests/bpf/prog_tests/netcnt.c
+++ b/tools/testing/selftests/bpf/prog_tests/netcnt.c
@@ -25,7 +25,7 @@ void serial_test_netcnt(void)
if (!ASSERT_OK_PTR(skel, "netcnt_prog__open_and_load"))
return;
- nproc = get_nprocs_conf();
+ nproc = bpf_num_possible_cpus();
percpu_netcnt = malloc(sizeof(*percpu_netcnt) * nproc);
if (!ASSERT_OK_PTR(percpu_netcnt, "malloc(percpu_netcnt)"))
goto err;
diff --git a/tools/testing/selftests/bpf/prog_tests/probe_user.c b/tools/testing/selftests/bpf/prog_tests/probe_user.c
index abf890d066eb..34dbd2adc157 100644
--- a/tools/testing/selftests/bpf/prog_tests/probe_user.c
+++ b/tools/testing/selftests/bpf/prog_tests/probe_user.c
@@ -4,25 +4,35 @@
/* TODO: corrupts other tests uses connect() */
void serial_test_probe_user(void)
{
- const char *prog_name = "handle_sys_connect";
+ static const char *const prog_names[] = {
+ "handle_sys_connect",
+#if defined(__s390x__)
+ "handle_sys_socketcall",
+#endif
+ };
+ enum { prog_count = ARRAY_SIZE(prog_names) };
const char *obj_file = "./test_probe_user.o";
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, );
int err, results_map_fd, sock_fd, duration = 0;
struct sockaddr curr, orig, tmp;
struct sockaddr_in *in = (struct sockaddr_in *)&curr;
- struct bpf_link *kprobe_link = NULL;
- struct bpf_program *kprobe_prog;
+ struct bpf_link *kprobe_links[prog_count] = {};
+ struct bpf_program *kprobe_progs[prog_count];
struct bpf_object *obj;
static const int zero = 0;
+ size_t i;
obj = bpf_object__open_file(obj_file, &opts);
if (!ASSERT_OK_PTR(obj, "obj_open_file"))
return;
- kprobe_prog = bpf_object__find_program_by_name(obj, prog_name);
- if (CHECK(!kprobe_prog, "find_probe",
- "prog '%s' not found\n", prog_name))
- goto cleanup;
+ for (i = 0; i < prog_count; i++) {
+ kprobe_progs[i] =
+ bpf_object__find_program_by_name(obj, prog_names[i]);
+ if (CHECK(!kprobe_progs[i], "find_probe",
+ "prog '%s' not found\n", prog_names[i]))
+ goto cleanup;
+ }
err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "err %d\n", err))
@@ -33,9 +43,11 @@ void serial_test_probe_user(void)
"err %d\n", results_map_fd))
goto cleanup;
- kprobe_link = bpf_program__attach(kprobe_prog);
- if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe"))
- goto cleanup;
+ for (i = 0; i < prog_count; i++) {
+ kprobe_links[i] = bpf_program__attach(kprobe_progs[i]);
+ if (!ASSERT_OK_PTR(kprobe_links[i], "attach_kprobe"))
+ goto cleanup;
+ }
memset(&curr, 0, sizeof(curr));
in->sin_family = AF_INET;
@@ -69,6 +81,7 @@ void serial_test_probe_user(void)
inet_ntoa(in->sin_addr), ntohs(in->sin_port)))
goto cleanup;
cleanup:
- bpf_link__destroy(kprobe_link);
+ for (i = 0; i < prog_count; i++)
+ bpf_link__destroy(kprobe_links[i]);
bpf_object__close(obj);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c b/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c
new file mode 100644
index 000000000000..14f2796076e0
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+#include "test_progs.h"
+#include "testing_helpers.h"
+
+static void clear_test_state(struct test_state *state)
+{
+ state->error_cnt = 0;
+ state->sub_succ_cnt = 0;
+ state->skip_cnt = 0;
+}
+
+void test_prog_tests_framework(void)
+{
+ struct test_state *state = env.test_state;
+
+ /* in all the ASSERT calls below we need to return on the first
+ * error due to the fact that we are cleaning the test state after
+ * each dummy subtest
+ */
+
+ /* test we properly count skipped tests with subtests */
+ if (test__start_subtest("test_good_subtest"))
+ test__end_subtest();
+ if (!ASSERT_EQ(state->skip_cnt, 0, "skip_cnt_check"))
+ return;
+ if (!ASSERT_EQ(state->error_cnt, 0, "error_cnt_check"))
+ return;
+ if (!ASSERT_EQ(state->subtest_num, 1, "subtest_num_check"))
+ return;
+ clear_test_state(state);
+
+ if (test__start_subtest("test_skip_subtest")) {
+ test__skip();
+ test__end_subtest();
+ }
+ if (test__start_subtest("test_skip_subtest")) {
+ test__skip();
+ test__end_subtest();
+ }
+ if (!ASSERT_EQ(state->skip_cnt, 2, "skip_cnt_check"))
+ return;
+ if (!ASSERT_EQ(state->subtest_num, 3, "subtest_num_check"))
+ return;
+ clear_test_state(state);
+
+ if (test__start_subtest("test_fail_subtest")) {
+ test__fail();
+ test__end_subtest();
+ }
+ if (!ASSERT_EQ(state->error_cnt, 1, "error_cnt_check"))
+ return;
+ if (!ASSERT_EQ(state->subtest_num, 4, "subtest_num_check"))
+ return;
+ clear_test_state(state);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
index 873323fb18ba..739d2ea6ca55 100644
--- a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
+++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
@@ -1,21 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
-static void toggle_object_autoload_progs(const struct bpf_object *obj,
- const char *name_load)
-{
- struct bpf_program *prog;
-
- bpf_object__for_each_program(prog, obj) {
- const char *name = bpf_program__name(prog);
-
- if (!strcmp(name_load, name))
- bpf_program__set_autoload(prog, true);
- else
- bpf_program__set_autoload(prog, false);
- }
-}
-
void test_reference_tracking(void)
{
const char *file = "test_sk_lookup_kern.o";
@@ -39,6 +24,7 @@ void test_reference_tracking(void)
goto cleanup;
bpf_object__for_each_program(prog, obj_iter) {
+ struct bpf_program *p;
const char *name;
name = bpf_program__name(prog);
@@ -49,7 +35,12 @@ void test_reference_tracking(void)
if (!ASSERT_OK_PTR(obj, "obj_open_file"))
goto cleanup;
- toggle_object_autoload_progs(obj, name);
+ /* all programs are not loaded by default, so just set
+ * autoload to true for the single prog under test
+ */
+ p = bpf_object__find_program_by_name(obj, name);
+ bpf_program__set_autoload(p, true);
+
/* Expect verifier failure if test name has 'err' */
if (strncmp(name, "err_", sizeof("err_") - 1) == 0) {
libbpf_print_fn_t old_print_fn;
diff --git a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
index f4a13d9dd5c8..c197261d02e2 100644
--- a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
+++ b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
@@ -44,7 +44,7 @@ BTF_ID(union, U)
BTF_ID(func, func)
extern __u32 test_list_global[];
-BTF_ID_LIST_GLOBAL(test_list_global)
+BTF_ID_LIST_GLOBAL(test_list_global, 1)
BTF_ID_UNUSED
BTF_ID(typedef, S)
BTF_ID(typedef, T)
diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c
index e945195b24c9..1455911d9fcb 100644
--- a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c
+++ b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c
@@ -50,17 +50,12 @@ void test_ringbuf_multi(void)
if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
- err = bpf_map__set_max_entries(skel->maps.ringbuf1, page_size);
- if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
- goto cleanup;
-
- err = bpf_map__set_max_entries(skel->maps.ringbuf2, page_size);
- if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
- goto cleanup;
-
- err = bpf_map__set_max_entries(bpf_map__inner_map(skel->maps.ringbuf_arr), page_size);
- if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
- goto cleanup;
+ /* validate ringbuf size adjustment logic */
+ ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), page_size, "rb1_size_before");
+ ASSERT_OK(bpf_map__set_max_entries(skel->maps.ringbuf1, page_size + 1), "rb1_resize");
+ ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), 2 * page_size, "rb1_size_after");
+ ASSERT_OK(bpf_map__set_max_entries(skel->maps.ringbuf1, page_size), "rb1_reset");
+ ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), page_size, "rb1_size_final");
proto_fd = bpf_map_create(BPF_MAP_TYPE_RINGBUF, NULL, 0, 0, page_size, NULL);
if (CHECK(proto_fd < 0, "bpf_map_create", "bpf_map_create failed\n"))
@@ -77,6 +72,10 @@ void test_ringbuf_multi(void)
close(proto_fd);
proto_fd = -1;
+ /* make sure we can't resize ringbuf after object load */
+ if (!ASSERT_ERR(bpf_map__set_max_entries(skel->maps.ringbuf1, 3 * page_size), "rb1_resize_after_load"))
+ goto cleanup;
+
/* only trigger BPF program for current process */
skel->bss->pid = getpid();
diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c
index d71226e34c34..d63a20fbed33 100644
--- a/tools/testing/selftests/bpf/prog_tests/send_signal.c
+++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c
@@ -64,7 +64,7 @@ static void test_send_signal_common(struct perf_event_attr *attr,
ASSERT_EQ(read(pipe_p2c[0], buf, 1), 1, "pipe_read");
/* wait a little for signal handler */
- for (int i = 0; i < 100000000 && !sigusr1_received; i++)
+ for (int i = 0; i < 1000000000 && !sigusr1_received; i++)
j /= i + j + 1;
buf[0] = sigusr1_received ? '2' : '0';
diff --git a/tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c b/tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c
new file mode 100644
index 000000000000..d7f83c0a40a5
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "skb_load_bytes.skel.h"
+
+void test_skb_load_bytes(void)
+{
+ struct skb_load_bytes *skel;
+ int err, prog_fd, test_result;
+ struct __sk_buff skb = { 0 };
+
+ LIBBPF_OPTS(bpf_test_run_opts, tattr,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .ctx_in = &skb,
+ .ctx_size_in = sizeof(skb),
+ );
+
+ skel = skb_load_bytes__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.skb_process);
+ if (!ASSERT_GE(prog_fd, 0, "prog_fd"))
+ goto out;
+
+ skel->bss->load_offset = (uint32_t)(-1);
+ err = bpf_prog_test_run_opts(prog_fd, &tattr);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
+ goto out;
+ test_result = skel->bss->test_result;
+ if (!ASSERT_EQ(test_result, -EFAULT, "offset -1"))
+ goto out;
+
+ skel->bss->load_offset = (uint32_t)10;
+ err = bpf_prog_test_run_opts(prog_fd, &tattr);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
+ goto out;
+ test_result = skel->bss->test_result;
+ if (!ASSERT_EQ(test_result, 0, "offset 10"))
+ goto out;
+
+out:
+ skb_load_bytes__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/skeleton.c b/tools/testing/selftests/bpf/prog_tests/skeleton.c
index 180afd632f4c..99dac5292b41 100644
--- a/tools/testing/selftests/bpf/prog_tests/skeleton.c
+++ b/tools/testing/selftests/bpf/prog_tests/skeleton.c
@@ -122,6 +122,8 @@ void test_skeleton(void)
ASSERT_EQ(skel->bss->out_mostly_var, 123, "out_mostly_var");
+ ASSERT_EQ(bss->huge_arr[ARRAY_SIZE(bss->huge_arr) - 1], 123, "huge_arr");
+
elf_bytes = test_skeleton__elf_bytes(&elf_bytes_sz);
ASSERT_OK_PTR(elf_bytes, "elf_bytes");
ASSERT_GE(elf_bytes_sz, 0, "elf_bytes_sz");
diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf.c b/tools/testing/selftests/bpf/prog_tests/snprintf.c
index 394ebfc3bbf3..4be6fdb78c6a 100644
--- a/tools/testing/selftests/bpf/prog_tests/snprintf.c
+++ b/tools/testing/selftests/bpf/prog_tests/snprintf.c
@@ -83,8 +83,6 @@ cleanup:
test_snprintf__destroy(skel);
}
-#define min(a, b) ((a) < (b) ? (a) : (b))
-
/* Loads an eBPF object calling bpf_snprintf with up to 10 characters of fmt */
static int load_single_snprintf(char *fmt)
{
@@ -95,7 +93,7 @@ static int load_single_snprintf(char *fmt)
if (!skel)
return -EINVAL;
- memcpy(skel->rodata->fmt, fmt, min(strlen(fmt) + 1, 10));
+ memcpy(skel->rodata->fmt, fmt, MIN(strlen(fmt) + 1, 10));
ret = test_snprintf_single__load(skel);
test_snprintf_single__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_fields.c b/tools/testing/selftests/bpf/prog_tests/sock_fields.c
index 9d211b5c22c4..7d23166c77af 100644
--- a/tools/testing/selftests/bpf/prog_tests/sock_fields.c
+++ b/tools/testing/selftests/bpf/prog_tests/sock_fields.c
@@ -394,7 +394,6 @@ void serial_test_sock_fields(void)
test();
done:
- test_sock_fields__detach(skel);
test_sock_fields__destroy(skel);
if (child_cg_fd >= 0)
close(child_cg_fd);
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
index af293ea1542c..e172d89e92e1 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c
@@ -4,6 +4,7 @@
* Tests for sockmap/sockhash holding kTLS sockets.
*/
+#include <netinet/tcp.h>
#include "test_progs.h"
#define MAX_TEST_NAME 80
@@ -92,9 +93,78 @@ close_srv:
close(srv);
}
+static void test_sockmap_ktls_update_fails_when_sock_has_ulp(int family, int map)
+{
+ struct sockaddr_storage addr = {};
+ socklen_t len = sizeof(addr);
+ struct sockaddr_in6 *v6;
+ struct sockaddr_in *v4;
+ int err, s, zero = 0;
+
+ switch (family) {
+ case AF_INET:
+ v4 = (struct sockaddr_in *)&addr;
+ v4->sin_family = AF_INET;
+ break;
+ case AF_INET6:
+ v6 = (struct sockaddr_in6 *)&addr;
+ v6->sin6_family = AF_INET6;
+ break;
+ default:
+ PRINT_FAIL("unsupported socket family %d", family);
+ return;
+ }
+
+ s = socket(family, SOCK_STREAM, 0);
+ if (!ASSERT_GE(s, 0, "socket"))
+ return;
+
+ err = bind(s, (struct sockaddr *)&addr, len);
+ if (!ASSERT_OK(err, "bind"))
+ goto close;
+
+ err = getsockname(s, (struct sockaddr *)&addr, &len);
+ if (!ASSERT_OK(err, "getsockname"))
+ goto close;
+
+ err = connect(s, (struct sockaddr *)&addr, len);
+ if (!ASSERT_OK(err, "connect"))
+ goto close;
+
+ /* save sk->sk_prot and set it to tls_prots */
+ err = setsockopt(s, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls"));
+ if (!ASSERT_OK(err, "setsockopt(TCP_ULP)"))
+ goto close;
+
+ /* sockmap update should not affect saved sk_prot */
+ err = bpf_map_update_elem(map, &zero, &s, BPF_ANY);
+ if (!ASSERT_ERR(err, "sockmap update elem"))
+ goto close;
+
+ /* call sk->sk_prot->setsockopt to dispatch to saved sk_prot */
+ err = setsockopt(s, IPPROTO_TCP, TCP_NODELAY, &zero, sizeof(zero));
+ ASSERT_OK(err, "setsockopt(TCP_NODELAY)");
+
+close:
+ close(s);
+}
+
+static const char *fmt_test_name(const char *subtest_name, int family,
+ enum bpf_map_type map_type)
+{
+ const char *map_type_str = BPF_MAP_TYPE_SOCKMAP ? "SOCKMAP" : "SOCKHASH";
+ const char *family_str = AF_INET ? "IPv4" : "IPv6";
+ static char test_name[MAX_TEST_NAME];
+
+ snprintf(test_name, MAX_TEST_NAME,
+ "sockmap_ktls %s %s %s",
+ subtest_name, family_str, map_type_str);
+
+ return test_name;
+}
+
static void run_tests(int family, enum bpf_map_type map_type)
{
- char test_name[MAX_TEST_NAME];
int map;
map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL);
@@ -103,14 +173,10 @@ static void run_tests(int family, enum bpf_map_type map_type)
return;
}
- snprintf(test_name, MAX_TEST_NAME,
- "sockmap_ktls disconnect_after_delete %s %s",
- family == AF_INET ? "IPv4" : "IPv6",
- map_type == BPF_MAP_TYPE_SOCKMAP ? "SOCKMAP" : "SOCKHASH");
- if (!test__start_subtest(test_name))
- return;
-
- test_sockmap_ktls_disconnect_after_delete(family, map);
+ if (test__start_subtest(fmt_test_name("disconnect_after_delete", family, map_type)))
+ test_sockmap_ktls_disconnect_after_delete(family, map);
+ if (test__start_subtest(fmt_test_name("update_fails_when_sock_has_ulp", family, map_type)))
+ test_sockmap_ktls_update_fails_when_sock_has_ulp(family, map);
close(map);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
index e8399ae50e77..9ad09a6c538a 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
@@ -8,7 +8,7 @@ void test_stacktrace_build_id(void)
int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
struct test_stacktrace_build_id *skel;
int err, stack_trace_len;
- __u32 key, previous_key, val, duration = 0;
+ __u32 key, prev_key, val, duration = 0;
char buf[256];
int i, j;
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
@@ -58,7 +58,7 @@ retry:
"err %d errno %d\n", err, errno))
goto cleanup;
- err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
+ err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key));
if (CHECK(err, "get_next_key from stackmap",
"err %d, errno %d\n", err, errno))
goto cleanup;
@@ -79,8 +79,8 @@ retry:
if (strstr(buf, build_id) != NULL)
build_id_matches = 1;
}
- previous_key = key;
- } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
+ prev_key = key;
+ } while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0);
/* stack_map_get_build_id_offset() is racy and sometimes can return
* BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
index f45a1d7b0a28..f4ea1a215ce4 100644
--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
+++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
@@ -27,7 +27,7 @@ void test_stacktrace_build_id_nmi(void)
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
};
- __u32 key, previous_key, val, duration = 0;
+ __u32 key, prev_key, val, duration = 0;
char buf[256];
int i, j;
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
@@ -100,7 +100,7 @@ retry:
"err %d errno %d\n", err, errno))
goto cleanup;
- err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
+ err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key));
if (CHECK(err, "get_next_key from stackmap",
"err %d, errno %d\n", err, errno))
goto cleanup;
@@ -108,7 +108,8 @@ retry:
do {
char build_id[64];
- err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
+ err = bpf_map__lookup_elem(skel->maps.stackmap, &key, sizeof(key),
+ id_offs, sizeof(id_offs), 0);
if (CHECK(err, "lookup_elem from stackmap",
"err %d, errno %d\n", err, errno))
goto cleanup;
@@ -121,8 +122,8 @@ retry:
if (strstr(buf, build_id) != NULL)
build_id_matches = 1;
}
- previous_key = key;
- } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
+ prev_key = key;
+ } while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0);
/* stack_map_get_build_id_offset() is racy and sometimes can return
* BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
index c4da87ec3ba4..19c70880cfb3 100644
--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
@@ -831,6 +831,59 @@ out:
bpf_object__close(obj);
}
+#include "tailcall_bpf2bpf6.skel.h"
+
+/* Tail call counting works even when there is data on stack which is
+ * not aligned to 8 bytes.
+ */
+static void test_tailcall_bpf2bpf_6(void)
+{
+ struct tailcall_bpf2bpf6 *obj;
+ int err, map_fd, prog_fd, main_fd, data_fd, i, val;
+ LIBBPF_OPTS(bpf_test_run_opts, topts,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .repeat = 1,
+ );
+
+ obj = tailcall_bpf2bpf6__open_and_load();
+ if (!ASSERT_OK_PTR(obj, "open and load"))
+ return;
+
+ main_fd = bpf_program__fd(obj->progs.entry);
+ if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
+ goto out;
+
+ map_fd = bpf_map__fd(obj->maps.jmp_table);
+ if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
+ goto out;
+
+ prog_fd = bpf_program__fd(obj->progs.classifier_0);
+ if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (!ASSERT_OK(err, "jmp_table map update"))
+ goto out;
+
+ err = bpf_prog_test_run_opts(main_fd, &topts);
+ ASSERT_OK(err, "entry prog test run");
+ ASSERT_EQ(topts.retval, 0, "tailcall retval");
+
+ data_fd = bpf_map__fd(obj->maps.bss);
+ if (!ASSERT_GE(map_fd, 0, "bss map fd"))
+ goto out;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+ ASSERT_OK(err, "bss map lookup");
+ ASSERT_EQ(val, 1, "done flag is set");
+
+out:
+ tailcall_bpf2bpf6__destroy(obj);
+}
+
void test_tailcalls(void)
{
if (test__start_subtest("tailcall_1"))
@@ -855,4 +908,6 @@ void test_tailcalls(void)
test_tailcall_bpf2bpf_4(false);
if (test__start_subtest("tailcall_bpf2bpf_5"))
test_tailcall_bpf2bpf_4(true);
+ if (test__start_subtest("tailcall_bpf2bpf_6"))
+ test_tailcall_bpf2bpf_6();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
index 7ad66a247c02..cb6a53b3e023 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
@@ -646,7 +646,7 @@ static void test_tcp_clear_dtime(struct test_tc_dtime *skel)
__u32 *errs = skel->bss->errs[t];
skel->bss->test = t;
- test_inet_dtime(AF_INET6, SOCK_STREAM, IP6_DST, 0);
+ test_inet_dtime(AF_INET6, SOCK_STREAM, IP6_DST, 50000 + t);
ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0,
dtime_cnt_str(t, INGRESS_FWDNS_P100));
@@ -683,7 +683,7 @@ static void test_tcp_dtime(struct test_tc_dtime *skel, int family, bool bpf_fwd)
errs = skel->bss->errs[t];
skel->bss->test = t;
- test_inet_dtime(family, SOCK_STREAM, addr, 0);
+ test_inet_dtime(family, SOCK_STREAM, addr, 50000 + t);
/* fwdns_prio100 prog does not read delivery_time_type, so
* kernel puts the (rcv) timetamp in __sk_buff->tstamp
@@ -715,13 +715,13 @@ static void test_udp_dtime(struct test_tc_dtime *skel, int family, bool bpf_fwd)
errs = skel->bss->errs[t];
skel->bss->test = t;
- test_inet_dtime(family, SOCK_DGRAM, addr, 0);
+ test_inet_dtime(family, SOCK_DGRAM, addr, 50000 + t);
ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0,
dtime_cnt_str(t, INGRESS_FWDNS_P100));
/* non mono delivery time is not forwarded */
ASSERT_EQ(dtimes[INGRESS_FWDNS_P101], 0,
- dtime_cnt_str(t, INGRESS_FWDNS_P100));
+ dtime_cnt_str(t, INGRESS_FWDNS_P101));
for (i = EGRESS_FWDNS_P100; i < SET_DTIME; i++)
ASSERT_GT(dtimes[i], 0, dtime_cnt_str(t, i));
@@ -949,7 +949,6 @@ fail:
return -1;
}
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
enum {
SRC_TO_TARGET = 0,
TARGET_TO_SRC = 1,
diff --git a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c
index 509e21d5cb9d..b90ee47d3111 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c
@@ -81,6 +81,7 @@ void test_test_global_funcs(void)
{ "test_global_func14.o", "reference type('FWD S') size cannot be determined" },
{ "test_global_func15.o", "At program exit the register R0 has value" },
{ "test_global_func16.o", "invalid indirect read from stack" },
+ { "test_global_func17.o", "Caller passes invalid args into func#1" },
};
libbpf_print_fn_t old_print_fn = NULL;
int err, i, duration = 0;
diff --git a/tools/testing/selftests/bpf/prog_tests/test_strncmp.c b/tools/testing/selftests/bpf/prog_tests/test_strncmp.c
index b57a3009465f..7ddd6615b7e7 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_strncmp.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_strncmp.c
@@ -44,16 +44,12 @@ static void strncmp_full_str_cmp(struct strncmp_test *skel, const char *name,
static void test_strncmp_ret(void)
{
struct strncmp_test *skel;
- struct bpf_program *prog;
int err, got;
skel = strncmp_test__open();
if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
return;
- bpf_object__for_each_program(prog, skel->obj)
- bpf_program__set_autoload(prog, false);
-
bpf_program__set_autoload(skel->progs.do_strncmp, true);
err = strncmp_test__load(skel);
@@ -91,18 +87,13 @@ out:
static void test_strncmp_bad_not_const_str_size(void)
{
struct strncmp_test *skel;
- struct bpf_program *prog;
int err;
skel = strncmp_test__open();
if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
return;
- bpf_object__for_each_program(prog, skel->obj)
- bpf_program__set_autoload(prog, false);
-
- bpf_program__set_autoload(skel->progs.strncmp_bad_not_const_str_size,
- true);
+ bpf_program__set_autoload(skel->progs.strncmp_bad_not_const_str_size, true);
err = strncmp_test__load(skel);
ASSERT_ERR(err, "strncmp_test load bad_not_const_str_size");
@@ -113,18 +104,13 @@ static void test_strncmp_bad_not_const_str_size(void)
static void test_strncmp_bad_writable_target(void)
{
struct strncmp_test *skel;
- struct bpf_program *prog;
int err;
skel = strncmp_test__open();
if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
return;
- bpf_object__for_each_program(prog, skel->obj)
- bpf_program__set_autoload(prog, false);
-
- bpf_program__set_autoload(skel->progs.strncmp_bad_writable_target,
- true);
+ bpf_program__set_autoload(skel->progs.strncmp_bad_writable_target, true);
err = strncmp_test__load(skel);
ASSERT_ERR(err, "strncmp_test load bad_writable_target");
@@ -135,18 +121,13 @@ static void test_strncmp_bad_writable_target(void)
static void test_strncmp_bad_not_null_term_target(void)
{
struct strncmp_test *skel;
- struct bpf_program *prog;
int err;
skel = strncmp_test__open();
if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
return;
- bpf_object__for_each_program(prog, skel->obj)
- bpf_program__set_autoload(prog, false);
-
- bpf_program__set_autoload(skel->progs.strncmp_bad_not_null_term_target,
- true);
+ bpf_program__set_autoload(skel->progs.strncmp_bad_not_null_term_target, true);
err = strncmp_test__load(skel);
ASSERT_ERR(err, "strncmp_test load bad_not_null_term_target");
diff --git a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c
new file mode 100644
index 000000000000..eea274110267
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+/*
+ * End-to-end eBPF tunnel test suite
+ * The file tests BPF network tunnel implementation.
+ *
+ * Topology:
+ * ---------
+ * root namespace | at_ns0 namespace
+ * |
+ * ----------- | -----------
+ * | tnl dev | | | tnl dev | (overlay network)
+ * ----------- | -----------
+ * metadata-mode | metadata-mode
+ * with bpf | with bpf
+ * |
+ * ---------- | ----------
+ * | veth1 | --------- | veth0 | (underlay network)
+ * ---------- peer ----------
+ *
+ *
+ * Device Configuration
+ * --------------------
+ * root namespace with metadata-mode tunnel + BPF
+ * Device names and addresses:
+ * veth1 IP 1: 172.16.1.200, IPv6: 00::22 (underlay)
+ * IP 2: 172.16.1.20, IPv6: 00::bb (underlay)
+ * tunnel dev <type>11, ex: gre11, IPv4: 10.1.1.200, IPv6: 1::22 (overlay)
+ *
+ * Namespace at_ns0 with native tunnel
+ * Device names and addresses:
+ * veth0 IPv4: 172.16.1.100, IPv6: 00::11 (underlay)
+ * tunnel dev <type>00, ex: gre00, IPv4: 10.1.1.100, IPv6: 1::11 (overlay)
+ *
+ *
+ * End-to-end ping packet flow
+ * ---------------------------
+ * Most of the tests start by namespace creation, device configuration,
+ * then ping the underlay and overlay network. When doing 'ping 10.1.1.100'
+ * from root namespace, the following operations happen:
+ * 1) Route lookup shows 10.1.1.100/24 belongs to tnl dev, fwd to tnl dev.
+ * 2) Tnl device's egress BPF program is triggered and set the tunnel metadata,
+ * with local_ip=172.16.1.200, remote_ip=172.16.1.100. BPF program choose
+ * the primary or secondary ip of veth1 as the local ip of tunnel. The
+ * choice is made based on the value of bpf map local_ip_map.
+ * 3) Outer tunnel header is prepended and route the packet to veth1's egress.
+ * 4) veth0's ingress queue receive the tunneled packet at namespace at_ns0.
+ * 5) Tunnel protocol handler, ex: vxlan_rcv, decap the packet.
+ * 6) Forward the packet to the overlay tnl dev.
+ */
+
+#include <arpa/inet.h>
+#include <linux/if_tun.h>
+#include <linux/limits.h>
+#include <linux/sysctl.h>
+#include <linux/time_types.h>
+#include <linux/net_tstamp.h>
+#include <net/if.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "test_tunnel_kern.skel.h"
+
+#define IP4_ADDR_VETH0 "172.16.1.100"
+#define IP4_ADDR1_VETH1 "172.16.1.200"
+#define IP4_ADDR2_VETH1 "172.16.1.20"
+#define IP4_ADDR_TUNL_DEV0 "10.1.1.100"
+#define IP4_ADDR_TUNL_DEV1 "10.1.1.200"
+
+#define IP6_ADDR_VETH0 "::11"
+#define IP6_ADDR1_VETH1 "::22"
+#define IP6_ADDR2_VETH1 "::bb"
+
+#define IP4_ADDR1_HEX_VETH1 0xac1001c8
+#define IP4_ADDR2_HEX_VETH1 0xac100114
+#define IP6_ADDR1_HEX_VETH1 0x22
+#define IP6_ADDR2_HEX_VETH1 0xbb
+
+#define MAC_TUNL_DEV0 "52:54:00:d9:01:00"
+#define MAC_TUNL_DEV1 "52:54:00:d9:02:00"
+#define MAC_VETH1 "52:54:00:d9:03:00"
+
+#define VXLAN_TUNL_DEV0 "vxlan00"
+#define VXLAN_TUNL_DEV1 "vxlan11"
+#define IP6VXLAN_TUNL_DEV0 "ip6vxlan00"
+#define IP6VXLAN_TUNL_DEV1 "ip6vxlan11"
+
+#define PING_ARGS "-i 0.01 -c 3 -w 10 -q"
+
+#define SYS(fmt, ...) \
+ ({ \
+ char cmd[1024]; \
+ snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \
+ if (!ASSERT_OK(system(cmd), cmd)) \
+ goto fail; \
+ })
+
+#define SYS_NOFAIL(fmt, ...) \
+ ({ \
+ char cmd[1024]; \
+ snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \
+ system(cmd); \
+ })
+
+static int config_device(void)
+{
+ SYS("ip netns add at_ns0");
+ SYS("ip link add veth0 address " MAC_VETH1 " type veth peer name veth1");
+ SYS("ip link set veth0 netns at_ns0");
+ SYS("ip addr add " IP4_ADDR1_VETH1 "/24 dev veth1");
+ SYS("ip link set dev veth1 up mtu 1500");
+ SYS("ip netns exec at_ns0 ip addr add " IP4_ADDR_VETH0 "/24 dev veth0");
+ SYS("ip netns exec at_ns0 ip link set dev veth0 up mtu 1500");
+
+ return 0;
+fail:
+ return -1;
+}
+
+static void cleanup(void)
+{
+ SYS_NOFAIL("test -f /var/run/netns/at_ns0 && ip netns delete at_ns0");
+ SYS_NOFAIL("ip link del veth1 2> /dev/null");
+ SYS_NOFAIL("ip link del %s 2> /dev/null", VXLAN_TUNL_DEV1);
+ SYS_NOFAIL("ip link del %s 2> /dev/null", IP6VXLAN_TUNL_DEV1);
+}
+
+static int add_vxlan_tunnel(void)
+{
+ /* at_ns0 namespace */
+ SYS("ip netns exec at_ns0 ip link add dev %s type vxlan external gbp dstport 4789",
+ VXLAN_TUNL_DEV0);
+ SYS("ip netns exec at_ns0 ip link set dev %s address %s up",
+ VXLAN_TUNL_DEV0, MAC_TUNL_DEV0);
+ SYS("ip netns exec at_ns0 ip addr add dev %s %s/24",
+ VXLAN_TUNL_DEV0, IP4_ADDR_TUNL_DEV0);
+ SYS("ip netns exec at_ns0 ip neigh add %s lladdr %s dev %s",
+ IP4_ADDR_TUNL_DEV1, MAC_TUNL_DEV1, VXLAN_TUNL_DEV0);
+ SYS("ip netns exec at_ns0 ip neigh add %s lladdr %s dev veth0",
+ IP4_ADDR2_VETH1, MAC_VETH1);
+
+ /* root namespace */
+ SYS("ip link add dev %s type vxlan external gbp dstport 4789",
+ VXLAN_TUNL_DEV1);
+ SYS("ip link set dev %s address %s up", VXLAN_TUNL_DEV1, MAC_TUNL_DEV1);
+ SYS("ip addr add dev %s %s/24", VXLAN_TUNL_DEV1, IP4_ADDR_TUNL_DEV1);
+ SYS("ip neigh add %s lladdr %s dev %s",
+ IP4_ADDR_TUNL_DEV0, MAC_TUNL_DEV0, VXLAN_TUNL_DEV1);
+
+ return 0;
+fail:
+ return -1;
+}
+
+static void delete_vxlan_tunnel(void)
+{
+ SYS_NOFAIL("ip netns exec at_ns0 ip link delete dev %s",
+ VXLAN_TUNL_DEV0);
+ SYS_NOFAIL("ip link delete dev %s", VXLAN_TUNL_DEV1);
+}
+
+static int add_ip6vxlan_tunnel(void)
+{
+ SYS("ip netns exec at_ns0 ip -6 addr add %s/96 dev veth0",
+ IP6_ADDR_VETH0);
+ SYS("ip netns exec at_ns0 ip link set dev veth0 up");
+ SYS("ip -6 addr add %s/96 dev veth1", IP6_ADDR1_VETH1);
+ SYS("ip -6 addr add %s/96 dev veth1", IP6_ADDR2_VETH1);
+ SYS("ip link set dev veth1 up");
+
+ /* at_ns0 namespace */
+ SYS("ip netns exec at_ns0 ip link add dev %s type vxlan external dstport 4789",
+ IP6VXLAN_TUNL_DEV0);
+ SYS("ip netns exec at_ns0 ip addr add dev %s %s/24",
+ IP6VXLAN_TUNL_DEV0, IP4_ADDR_TUNL_DEV0);
+ SYS("ip netns exec at_ns0 ip link set dev %s address %s up",
+ IP6VXLAN_TUNL_DEV0, MAC_TUNL_DEV0);
+
+ /* root namespace */
+ SYS("ip link add dev %s type vxlan external dstport 4789",
+ IP6VXLAN_TUNL_DEV1);
+ SYS("ip addr add dev %s %s/24", IP6VXLAN_TUNL_DEV1, IP4_ADDR_TUNL_DEV1);
+ SYS("ip link set dev %s address %s up",
+ IP6VXLAN_TUNL_DEV1, MAC_TUNL_DEV1);
+
+ return 0;
+fail:
+ return -1;
+}
+
+static void delete_ip6vxlan_tunnel(void)
+{
+ SYS_NOFAIL("ip netns exec at_ns0 ip -6 addr delete %s/96 dev veth0",
+ IP6_ADDR_VETH0);
+ SYS_NOFAIL("ip -6 addr delete %s/96 dev veth1", IP6_ADDR1_VETH1);
+ SYS_NOFAIL("ip -6 addr delete %s/96 dev veth1", IP6_ADDR2_VETH1);
+ SYS_NOFAIL("ip netns exec at_ns0 ip link delete dev %s",
+ IP6VXLAN_TUNL_DEV0);
+ SYS_NOFAIL("ip link delete dev %s", IP6VXLAN_TUNL_DEV1);
+}
+
+static int test_ping(int family, const char *addr)
+{
+ SYS("%s %s %s > /dev/null", ping_command(family), PING_ARGS, addr);
+ return 0;
+fail:
+ return -1;
+}
+
+static int attach_tc_prog(struct bpf_tc_hook *hook, int igr_fd, int egr_fd)
+{
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts1, .handle = 1,
+ .priority = 1, .prog_fd = igr_fd);
+ DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts2, .handle = 1,
+ .priority = 1, .prog_fd = egr_fd);
+ int ret;
+
+ ret = bpf_tc_hook_create(hook);
+ if (!ASSERT_OK(ret, "create tc hook"))
+ return ret;
+
+ if (igr_fd >= 0) {
+ hook->attach_point = BPF_TC_INGRESS;
+ ret = bpf_tc_attach(hook, &opts1);
+ if (!ASSERT_OK(ret, "bpf_tc_attach")) {
+ bpf_tc_hook_destroy(hook);
+ return ret;
+ }
+ }
+
+ if (egr_fd >= 0) {
+ hook->attach_point = BPF_TC_EGRESS;
+ ret = bpf_tc_attach(hook, &opts2);
+ if (!ASSERT_OK(ret, "bpf_tc_attach")) {
+ bpf_tc_hook_destroy(hook);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void test_vxlan_tunnel(void)
+{
+ struct test_tunnel_kern *skel = NULL;
+ struct nstoken *nstoken;
+ int local_ip_map_fd = -1;
+ int set_src_prog_fd, get_src_prog_fd;
+ int set_dst_prog_fd;
+ int key = 0, ifindex = -1;
+ uint local_ip;
+ int err;
+ DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook,
+ .attach_point = BPF_TC_INGRESS);
+
+ /* add vxlan tunnel */
+ err = add_vxlan_tunnel();
+ if (!ASSERT_OK(err, "add vxlan tunnel"))
+ goto done;
+
+ /* load and attach bpf prog to tunnel dev tc hook point */
+ skel = test_tunnel_kern__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load"))
+ goto done;
+ ifindex = if_nametoindex(VXLAN_TUNL_DEV1);
+ if (!ASSERT_NEQ(ifindex, 0, "vxlan11 ifindex"))
+ goto done;
+ tc_hook.ifindex = ifindex;
+ get_src_prog_fd = bpf_program__fd(skel->progs.vxlan_get_tunnel_src);
+ set_src_prog_fd = bpf_program__fd(skel->progs.vxlan_set_tunnel_src);
+ if (!ASSERT_GE(get_src_prog_fd, 0, "bpf_program__fd"))
+ goto done;
+ if (!ASSERT_GE(set_src_prog_fd, 0, "bpf_program__fd"))
+ goto done;
+ if (attach_tc_prog(&tc_hook, get_src_prog_fd, set_src_prog_fd))
+ goto done;
+
+ /* load and attach bpf prog to veth dev tc hook point */
+ ifindex = if_nametoindex("veth1");
+ if (!ASSERT_NEQ(ifindex, 0, "veth1 ifindex"))
+ goto done;
+ tc_hook.ifindex = ifindex;
+ set_dst_prog_fd = bpf_program__fd(skel->progs.veth_set_outer_dst);
+ if (!ASSERT_GE(set_dst_prog_fd, 0, "bpf_program__fd"))
+ goto done;
+ if (attach_tc_prog(&tc_hook, set_dst_prog_fd, -1))
+ goto done;
+
+ /* load and attach prog set_md to tunnel dev tc hook point at_ns0 */
+ nstoken = open_netns("at_ns0");
+ if (!ASSERT_OK_PTR(nstoken, "setns src"))
+ goto done;
+ ifindex = if_nametoindex(VXLAN_TUNL_DEV0);
+ if (!ASSERT_NEQ(ifindex, 0, "vxlan00 ifindex"))
+ goto done;
+ tc_hook.ifindex = ifindex;
+ set_dst_prog_fd = bpf_program__fd(skel->progs.vxlan_set_tunnel_dst);
+ if (!ASSERT_GE(set_dst_prog_fd, 0, "bpf_program__fd"))
+ goto done;
+ if (attach_tc_prog(&tc_hook, -1, set_dst_prog_fd))
+ goto done;
+ close_netns(nstoken);
+
+ /* use veth1 ip 2 as tunnel source ip */
+ local_ip_map_fd = bpf_map__fd(skel->maps.local_ip_map);
+ if (!ASSERT_GE(local_ip_map_fd, 0, "bpf_map__fd"))
+ goto done;
+ local_ip = IP4_ADDR2_HEX_VETH1;
+ err = bpf_map_update_elem(local_ip_map_fd, &key, &local_ip, BPF_ANY);
+ if (!ASSERT_OK(err, "update bpf local_ip_map"))
+ goto done;
+
+ /* ping test */
+ err = test_ping(AF_INET, IP4_ADDR_TUNL_DEV0);
+ if (!ASSERT_OK(err, "test_ping"))
+ goto done;
+
+done:
+ /* delete vxlan tunnel */
+ delete_vxlan_tunnel();
+ if (local_ip_map_fd >= 0)
+ close(local_ip_map_fd);
+ if (skel)
+ test_tunnel_kern__destroy(skel);
+}
+
+static void test_ip6vxlan_tunnel(void)
+{
+ struct test_tunnel_kern *skel = NULL;
+ struct nstoken *nstoken;
+ int local_ip_map_fd = -1;
+ int set_src_prog_fd, get_src_prog_fd;
+ int set_dst_prog_fd;
+ int key = 0, ifindex = -1;
+ uint local_ip;
+ int err;
+ DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook,
+ .attach_point = BPF_TC_INGRESS);
+
+ /* add vxlan tunnel */
+ err = add_ip6vxlan_tunnel();
+ if (!ASSERT_OK(err, "add_ip6vxlan_tunnel"))
+ goto done;
+
+ /* load and attach bpf prog to tunnel dev tc hook point */
+ skel = test_tunnel_kern__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load"))
+ goto done;
+ ifindex = if_nametoindex(IP6VXLAN_TUNL_DEV1);
+ if (!ASSERT_NEQ(ifindex, 0, "ip6vxlan11 ifindex"))
+ goto done;
+ tc_hook.ifindex = ifindex;
+ get_src_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_get_tunnel_src);
+ set_src_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_set_tunnel_src);
+ if (!ASSERT_GE(set_src_prog_fd, 0, "bpf_program__fd"))
+ goto done;
+ if (!ASSERT_GE(get_src_prog_fd, 0, "bpf_program__fd"))
+ goto done;
+ if (attach_tc_prog(&tc_hook, get_src_prog_fd, set_src_prog_fd))
+ goto done;
+
+ /* load and attach prog set_md to tunnel dev tc hook point at_ns0 */
+ nstoken = open_netns("at_ns0");
+ if (!ASSERT_OK_PTR(nstoken, "setns src"))
+ goto done;
+ ifindex = if_nametoindex(IP6VXLAN_TUNL_DEV0);
+ if (!ASSERT_NEQ(ifindex, 0, "ip6vxlan00 ifindex"))
+ goto done;
+ tc_hook.ifindex = ifindex;
+ set_dst_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_set_tunnel_dst);
+ if (!ASSERT_GE(set_dst_prog_fd, 0, "bpf_program__fd"))
+ goto done;
+ if (attach_tc_prog(&tc_hook, -1, set_dst_prog_fd))
+ goto done;
+ close_netns(nstoken);
+
+ /* use veth1 ip 2 as tunnel source ip */
+ local_ip_map_fd = bpf_map__fd(skel->maps.local_ip_map);
+ if (!ASSERT_GE(local_ip_map_fd, 0, "get local_ip_map fd"))
+ goto done;
+ local_ip = IP6_ADDR2_HEX_VETH1;
+ err = bpf_map_update_elem(local_ip_map_fd, &key, &local_ip, BPF_ANY);
+ if (!ASSERT_OK(err, "update bpf local_ip_map"))
+ goto done;
+
+ /* ping test */
+ err = test_ping(AF_INET, IP4_ADDR_TUNL_DEV0);
+ if (!ASSERT_OK(err, "test_ping"))
+ goto done;
+
+done:
+ /* delete ipv6 vxlan tunnel */
+ delete_ip6vxlan_tunnel();
+ if (local_ip_map_fd >= 0)
+ close(local_ip_map_fd);
+ if (skel)
+ test_tunnel_kern__destroy(skel);
+}
+
+#define RUN_TEST(name) \
+ ({ \
+ if (test__start_subtest(#name)) { \
+ test_ ## name(); \
+ } \
+ })
+
+static void *test_tunnel_run_tests(void *arg)
+{
+ cleanup();
+ config_device();
+
+ RUN_TEST(vxlan_tunnel);
+ RUN_TEST(ip6vxlan_tunnel);
+
+ cleanup();
+
+ return NULL;
+}
+
+void serial_test_tunnel(void)
+{
+ pthread_t test_thread;
+ int err;
+
+ /* Run the tests in their own thread to isolate the namespace changes
+ * so they do not affect the environment of other tests.
+ * (specifically needed because of unshare(CLONE_NEWNS) in open_netns())
+ */
+ err = pthread_create(&test_thread, NULL, &test_tunnel_run_tests, NULL);
+ if (ASSERT_OK(err, "pthread_create"))
+ ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join");
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/timer_mim.c b/tools/testing/selftests/bpf/prog_tests/timer_mim.c
index 2ee5f5ae11d4..9ff7843909e7 100644
--- a/tools/testing/selftests/bpf/prog_tests/timer_mim.c
+++ b/tools/testing/selftests/bpf/prog_tests/timer_mim.c
@@ -35,7 +35,7 @@ static int timer_mim(struct timer_mim *timer_skel)
ASSERT_EQ(timer_skel->bss->ok, 1 | 2, "ok");
close(bpf_map__fd(timer_skel->maps.inner_htab));
- err = bpf_map_delete_elem(bpf_map__fd(timer_skel->maps.outer_arr), &key1);
+ err = bpf_map__delete_elem(timer_skel->maps.outer_arr, &key1, sizeof(key1), 0);
ASSERT_EQ(err, 0, "delete inner map");
/* check that timer_cb[12] are no longer running */
diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
index 9c795ee52b7b..b0acbda6dbf5 100644
--- a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
+++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
@@ -1,126 +1,94 @@
// SPDX-License-Identifier: GPL-2.0-only
#define _GNU_SOURCE
-#include <sched.h>
-#include <sys/prctl.h>
#include <test_progs.h>
#define MAX_TRAMP_PROGS 38
struct inst {
struct bpf_object *obj;
- struct bpf_link *link_fentry;
- struct bpf_link *link_fexit;
+ struct bpf_link *link;
};
-static int test_task_rename(void)
-{
- int fd, duration = 0, err;
- char buf[] = "test_overhead";
-
- fd = open("/proc/self/comm", O_WRONLY|O_TRUNC);
- if (CHECK(fd < 0, "open /proc", "err %d", errno))
- return -1;
- err = write(fd, buf, sizeof(buf));
- if (err < 0) {
- CHECK(err < 0, "task rename", "err %d", errno);
- close(fd);
- return -1;
- }
- close(fd);
- return 0;
-}
-
-static struct bpf_link *load(struct bpf_object *obj, const char *name)
+static struct bpf_program *load_prog(char *file, char *name, struct inst *inst)
{
+ struct bpf_object *obj;
struct bpf_program *prog;
- int duration = 0;
+ int err;
+
+ obj = bpf_object__open_file(file, NULL);
+ if (!ASSERT_OK_PTR(obj, "obj_open_file"))
+ return NULL;
+
+ inst->obj = obj;
+
+ err = bpf_object__load(obj);
+ if (!ASSERT_OK(err, "obj_load"))
+ return NULL;
prog = bpf_object__find_program_by_name(obj, name);
- if (CHECK(!prog, "find_probe", "prog '%s' not found\n", name))
- return ERR_PTR(-EINVAL);
- return bpf_program__attach_trace(prog);
+ if (!ASSERT_OK_PTR(prog, "obj_find_prog"))
+ return NULL;
+
+ return prog;
}
/* TODO: use different target function to run in concurrent mode */
void serial_test_trampoline_count(void)
{
- const char *fentry_name = "prog1";
- const char *fexit_name = "prog2";
- const char *object = "test_trampoline_count.o";
- struct inst inst[MAX_TRAMP_PROGS] = {};
- int err, i = 0, duration = 0;
- struct bpf_object *obj;
+ char *file = "test_trampoline_count.o";
+ char *const progs[] = { "fentry_test", "fmod_ret_test", "fexit_test" };
+ struct inst inst[MAX_TRAMP_PROGS + 1] = {};
+ struct bpf_program *prog;
struct bpf_link *link;
- char comm[16] = {};
+ int prog_fd, err, i;
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
/* attach 'allowed' trampoline programs */
for (i = 0; i < MAX_TRAMP_PROGS; i++) {
- obj = bpf_object__open_file(object, NULL);
- if (!ASSERT_OK_PTR(obj, "obj_open_file")) {
- obj = NULL;
+ prog = load_prog(file, progs[i % ARRAY_SIZE(progs)], &inst[i]);
+ if (!prog)
goto cleanup;
- }
- err = bpf_object__load(obj);
- if (CHECK(err, "obj_load", "err %d\n", err))
+ link = bpf_program__attach(prog);
+ if (!ASSERT_OK_PTR(link, "attach_prog"))
goto cleanup;
- inst[i].obj = obj;
- obj = NULL;
-
- if (rand() % 2) {
- link = load(inst[i].obj, fentry_name);
- if (!ASSERT_OK_PTR(link, "attach_prog")) {
- link = NULL;
- goto cleanup;
- }
- inst[i].link_fentry = link;
- } else {
- link = load(inst[i].obj, fexit_name);
- if (!ASSERT_OK_PTR(link, "attach_prog")) {
- link = NULL;
- goto cleanup;
- }
- inst[i].link_fexit = link;
- }
+
+ inst[i].link = link;
}
/* and try 1 extra.. */
- obj = bpf_object__open_file(object, NULL);
- if (!ASSERT_OK_PTR(obj, "obj_open_file")) {
- obj = NULL;
+ prog = load_prog(file, "fmod_ret_test", &inst[i]);
+ if (!prog)
goto cleanup;
- }
-
- err = bpf_object__load(obj);
- if (CHECK(err, "obj_load", "err %d\n", err))
- goto cleanup_extra;
/* ..that needs to fail */
- link = load(obj, fentry_name);
- err = libbpf_get_error(link);
- if (!ASSERT_ERR_PTR(link, "cannot attach over the limit")) {
- bpf_link__destroy(link);
- goto cleanup_extra;
+ link = bpf_program__attach(prog);
+ if (!ASSERT_ERR_PTR(link, "attach_prog")) {
+ inst[i].link = link;
+ goto cleanup;
}
/* with E2BIG error */
- ASSERT_EQ(err, -E2BIG, "proper error check");
- ASSERT_EQ(link, NULL, "ptr_is_null");
+ if (!ASSERT_EQ(libbpf_get_error(link), -E2BIG, "E2BIG"))
+ goto cleanup;
+ if (!ASSERT_EQ(link, NULL, "ptr_is_null"))
+ goto cleanup;
/* and finaly execute the probe */
- if (CHECK_FAIL(prctl(PR_GET_NAME, comm, 0L, 0L, 0L)))
- goto cleanup_extra;
- CHECK_FAIL(test_task_rename());
- CHECK_FAIL(prctl(PR_SET_NAME, comm, 0L, 0L, 0L));
+ prog_fd = bpf_program__fd(prog);
+ if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd"))
+ goto cleanup;
+
+ err = bpf_prog_test_run_opts(prog_fd, &opts);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
+ goto cleanup;
+
+ ASSERT_EQ(opts.retval & 0xffff, 4, "bpf_modify_return_test.result");
+ ASSERT_EQ(opts.retval >> 16, 1, "bpf_modify_return_test.side_effect");
-cleanup_extra:
- bpf_object__close(obj);
cleanup:
- if (i >= MAX_TRAMP_PROGS)
- i = MAX_TRAMP_PROGS - 1;
for (; i >= 0; i--) {
- bpf_link__destroy(inst[i].link_fentry);
- bpf_link__destroy(inst[i].link_fexit);
+ bpf_link__destroy(inst[i].link);
bpf_object__close(inst[i].obj);
}
}
diff --git a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c
new file mode 100644
index 000000000000..1ed3cc2092db
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Oracle and/or its affiliates. */
+
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+#include "test_unpriv_bpf_disabled.skel.h"
+
+#include "cap_helpers.h"
+
+/* Using CAP_LAST_CAP is risky here, since it can get pulled in from
+ * an old /usr/include/linux/capability.h and be < CAP_BPF; as a result
+ * CAP_BPF would not be included in ALL_CAPS. Instead use CAP_BPF as
+ * we know its value is correct since it is explicitly defined in
+ * cap_helpers.h.
+ */
+#define ALL_CAPS ((2ULL << CAP_BPF) - 1)
+
+#define PINPATH "/sys/fs/bpf/unpriv_bpf_disabled_"
+#define NUM_MAPS 7
+
+static __u32 got_perfbuf_val;
+static __u32 got_ringbuf_val;
+
+static int process_ringbuf(void *ctx, void *data, size_t len)
+{
+ if (ASSERT_EQ(len, sizeof(__u32), "ringbuf_size_valid"))
+ got_ringbuf_val = *(__u32 *)data;
+ return 0;
+}
+
+static void process_perfbuf(void *ctx, int cpu, void *data, __u32 len)
+{
+ if (ASSERT_EQ(len, sizeof(__u32), "perfbuf_size_valid"))
+ got_perfbuf_val = *(__u32 *)data;
+}
+
+static int sysctl_set(const char *sysctl_path, char *old_val, const char *new_val)
+{
+ int ret = 0;
+ FILE *fp;
+
+ fp = fopen(sysctl_path, "r+");
+ if (!fp)
+ return -errno;
+ if (old_val && fscanf(fp, "%s", old_val) <= 0) {
+ ret = -ENOENT;
+ } else if (!old_val || strcmp(old_val, new_val) != 0) {
+ fseek(fp, 0, SEEK_SET);
+ if (fprintf(fp, "%s", new_val) < 0)
+ ret = -errno;
+ }
+ fclose(fp);
+
+ return ret;
+}
+
+static void test_unpriv_bpf_disabled_positive(struct test_unpriv_bpf_disabled *skel,
+ __u32 prog_id, int prog_fd, int perf_fd,
+ char **map_paths, int *map_fds)
+{
+ struct perf_buffer *perfbuf = NULL;
+ struct ring_buffer *ringbuf = NULL;
+ int i, nr_cpus, link_fd = -1;
+
+ nr_cpus = bpf_num_possible_cpus();
+
+ skel->bss->perfbuf_val = 1;
+ skel->bss->ringbuf_val = 2;
+
+ /* Positive tests for unprivileged BPF disabled. Verify we can
+ * - retrieve and interact with pinned maps;
+ * - set up and interact with perf buffer;
+ * - set up and interact with ring buffer;
+ * - create a link
+ */
+ perfbuf = perf_buffer__new(bpf_map__fd(skel->maps.perfbuf), 8, process_perfbuf, NULL, NULL,
+ NULL);
+ if (!ASSERT_OK_PTR(perfbuf, "perf_buffer__new"))
+ goto cleanup;
+
+ ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf), process_ringbuf, NULL, NULL);
+ if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
+ goto cleanup;
+
+ /* trigger & validate perf event, ringbuf output */
+ usleep(1);
+
+ ASSERT_GT(perf_buffer__poll(perfbuf, 100), -1, "perf_buffer__poll");
+ ASSERT_EQ(got_perfbuf_val, skel->bss->perfbuf_val, "check_perfbuf_val");
+ ASSERT_EQ(ring_buffer__consume(ringbuf), 1, "ring_buffer__consume");
+ ASSERT_EQ(got_ringbuf_val, skel->bss->ringbuf_val, "check_ringbuf_val");
+
+ for (i = 0; i < NUM_MAPS; i++) {
+ map_fds[i] = bpf_obj_get(map_paths[i]);
+ if (!ASSERT_GT(map_fds[i], -1, "obj_get"))
+ goto cleanup;
+ }
+
+ for (i = 0; i < NUM_MAPS; i++) {
+ bool prog_array = strstr(map_paths[i], "prog_array") != NULL;
+ bool array = strstr(map_paths[i], "array") != NULL;
+ bool buf = strstr(map_paths[i], "buf") != NULL;
+ __u32 key = 0, vals[nr_cpus], lookup_vals[nr_cpus];
+ __u32 expected_val = 1;
+ int j;
+
+ /* skip ringbuf, perfbuf */
+ if (buf)
+ continue;
+
+ for (j = 0; j < nr_cpus; j++)
+ vals[j] = expected_val;
+
+ if (prog_array) {
+ /* need valid prog array value */
+ vals[0] = prog_fd;
+ /* prog array lookup returns prog id, not fd */
+ expected_val = prog_id;
+ }
+ ASSERT_OK(bpf_map_update_elem(map_fds[i], &key, vals, 0), "map_update_elem");
+ ASSERT_OK(bpf_map_lookup_elem(map_fds[i], &key, &lookup_vals), "map_lookup_elem");
+ ASSERT_EQ(lookup_vals[0], expected_val, "map_lookup_elem_values");
+ if (!array)
+ ASSERT_OK(bpf_map_delete_elem(map_fds[i], &key), "map_delete_elem");
+ }
+
+ link_fd = bpf_link_create(bpf_program__fd(skel->progs.handle_perf_event), perf_fd,
+ BPF_PERF_EVENT, NULL);
+ ASSERT_GT(link_fd, 0, "link_create");
+
+cleanup:
+ if (link_fd)
+ close(link_fd);
+ if (perfbuf)
+ perf_buffer__free(perfbuf);
+ if (ringbuf)
+ ring_buffer__free(ringbuf);
+}
+
+static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *skel,
+ __u32 prog_id, int prog_fd, int perf_fd,
+ char **map_paths, int *map_fds)
+{
+ const struct bpf_insn prog_insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn);
+ LIBBPF_OPTS(bpf_prog_load_opts, load_opts);
+ struct bpf_map_info map_info = {};
+ __u32 map_info_len = sizeof(map_info);
+ struct bpf_link_info link_info = {};
+ __u32 link_info_len = sizeof(link_info);
+ struct btf *btf = NULL;
+ __u32 attach_flags = 0;
+ __u32 prog_ids[3] = {};
+ __u32 prog_cnt = 3;
+ __u32 next;
+ int i;
+
+ /* Negative tests for unprivileged BPF disabled. Verify we cannot
+ * - load BPF programs;
+ * - create BPF maps;
+ * - get a prog/map/link fd by id;
+ * - get next prog/map/link id
+ * - query prog
+ * - BTF load
+ */
+ ASSERT_EQ(bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "simple_prog", "GPL",
+ prog_insns, prog_insn_cnt, &load_opts),
+ -EPERM, "prog_load_fails");
+
+ for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_BLOOM_FILTER; i++)
+ ASSERT_EQ(bpf_map_create(i, NULL, sizeof(int), sizeof(int), 1, NULL),
+ -EPERM, "map_create_fails");
+
+ ASSERT_EQ(bpf_prog_get_fd_by_id(prog_id), -EPERM, "prog_get_fd_by_id_fails");
+ ASSERT_EQ(bpf_prog_get_next_id(prog_id, &next), -EPERM, "prog_get_next_id_fails");
+ ASSERT_EQ(bpf_prog_get_next_id(0, &next), -EPERM, "prog_get_next_id_fails");
+
+ if (ASSERT_OK(bpf_obj_get_info_by_fd(map_fds[0], &map_info, &map_info_len),
+ "obj_get_info_by_fd")) {
+ ASSERT_EQ(bpf_map_get_fd_by_id(map_info.id), -EPERM, "map_get_fd_by_id_fails");
+ ASSERT_EQ(bpf_map_get_next_id(map_info.id, &next), -EPERM,
+ "map_get_next_id_fails");
+ }
+ ASSERT_EQ(bpf_map_get_next_id(0, &next), -EPERM, "map_get_next_id_fails");
+
+ if (ASSERT_OK(bpf_obj_get_info_by_fd(bpf_link__fd(skel->links.sys_nanosleep_enter),
+ &link_info, &link_info_len),
+ "obj_get_info_by_fd")) {
+ ASSERT_EQ(bpf_link_get_fd_by_id(link_info.id), -EPERM, "link_get_fd_by_id_fails");
+ ASSERT_EQ(bpf_link_get_next_id(link_info.id, &next), -EPERM,
+ "link_get_next_id_fails");
+ }
+ ASSERT_EQ(bpf_link_get_next_id(0, &next), -EPERM, "link_get_next_id_fails");
+
+ ASSERT_EQ(bpf_prog_query(prog_fd, BPF_TRACE_FENTRY, 0, &attach_flags, prog_ids,
+ &prog_cnt), -EPERM, "prog_query_fails");
+
+ btf = btf__new_empty();
+ if (ASSERT_OK_PTR(btf, "empty_btf") &&
+ ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "unpriv_int_type")) {
+ const void *raw_btf_data;
+ __u32 raw_btf_size;
+
+ raw_btf_data = btf__raw_data(btf, &raw_btf_size);
+ if (ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_good"))
+ ASSERT_EQ(bpf_btf_load(raw_btf_data, raw_btf_size, NULL), -EPERM,
+ "bpf_btf_load_fails");
+ }
+ btf__free(btf);
+}
+
+void test_unpriv_bpf_disabled(void)
+{
+ char *map_paths[NUM_MAPS] = { PINPATH "array",
+ PINPATH "percpu_array",
+ PINPATH "hash",
+ PINPATH "percpu_hash",
+ PINPATH "perfbuf",
+ PINPATH "ringbuf",
+ PINPATH "prog_array" };
+ int map_fds[NUM_MAPS];
+ struct test_unpriv_bpf_disabled *skel;
+ char unprivileged_bpf_disabled_orig[32] = {};
+ char perf_event_paranoid_orig[32] = {};
+ struct bpf_prog_info prog_info = {};
+ __u32 prog_info_len = sizeof(prog_info);
+ struct perf_event_attr attr = {};
+ int prog_fd, perf_fd = -1, i, ret;
+ __u64 save_caps = 0;
+ __u32 prog_id;
+
+ skel = test_unpriv_bpf_disabled__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ skel->bss->test_pid = getpid();
+
+ map_fds[0] = bpf_map__fd(skel->maps.array);
+ map_fds[1] = bpf_map__fd(skel->maps.percpu_array);
+ map_fds[2] = bpf_map__fd(skel->maps.hash);
+ map_fds[3] = bpf_map__fd(skel->maps.percpu_hash);
+ map_fds[4] = bpf_map__fd(skel->maps.perfbuf);
+ map_fds[5] = bpf_map__fd(skel->maps.ringbuf);
+ map_fds[6] = bpf_map__fd(skel->maps.prog_array);
+
+ for (i = 0; i < NUM_MAPS; i++)
+ ASSERT_OK(bpf_obj_pin(map_fds[i], map_paths[i]), "pin map_fd");
+
+ /* allow user without caps to use perf events */
+ if (!ASSERT_OK(sysctl_set("/proc/sys/kernel/perf_event_paranoid", perf_event_paranoid_orig,
+ "-1"),
+ "set_perf_event_paranoid"))
+ goto cleanup;
+ /* ensure unprivileged bpf disabled is set */
+ ret = sysctl_set("/proc/sys/kernel/unprivileged_bpf_disabled",
+ unprivileged_bpf_disabled_orig, "2");
+ if (ret == -EPERM) {
+ /* if unprivileged_bpf_disabled=1, we get -EPERM back; that's okay. */
+ if (!ASSERT_OK(strcmp(unprivileged_bpf_disabled_orig, "1"),
+ "unprivileged_bpf_disabled_on"))
+ goto cleanup;
+ } else {
+ if (!ASSERT_OK(ret, "set unprivileged_bpf_disabled"))
+ goto cleanup;
+ }
+
+ prog_fd = bpf_program__fd(skel->progs.sys_nanosleep_enter);
+ ASSERT_OK(bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len),
+ "obj_get_info_by_fd");
+ prog_id = prog_info.id;
+ ASSERT_GT(prog_id, 0, "valid_prog_id");
+
+ attr.size = sizeof(attr);
+ attr.type = PERF_TYPE_SOFTWARE;
+ attr.config = PERF_COUNT_SW_CPU_CLOCK;
+ attr.freq = 1;
+ attr.sample_freq = 1000;
+ perf_fd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
+ if (!ASSERT_GE(perf_fd, 0, "perf_fd"))
+ goto cleanup;
+
+ if (!ASSERT_OK(test_unpriv_bpf_disabled__attach(skel), "skel_attach"))
+ goto cleanup;
+
+ if (!ASSERT_OK(cap_disable_effective(ALL_CAPS, &save_caps), "disable caps"))
+ goto cleanup;
+
+ if (test__start_subtest("unpriv_bpf_disabled_positive"))
+ test_unpriv_bpf_disabled_positive(skel, prog_id, prog_fd, perf_fd, map_paths,
+ map_fds);
+
+ if (test__start_subtest("unpriv_bpf_disabled_negative"))
+ test_unpriv_bpf_disabled_negative(skel, prog_id, prog_fd, perf_fd, map_paths,
+ map_fds);
+
+cleanup:
+ close(perf_fd);
+ if (save_caps)
+ cap_enable_effective(save_caps, NULL);
+ if (strlen(perf_event_paranoid_orig) > 0)
+ sysctl_set("/proc/sys/kernel/perf_event_paranoid", NULL, perf_event_paranoid_orig);
+ if (strlen(unprivileged_bpf_disabled_orig) > 0)
+ sysctl_set("/proc/sys/kernel/unprivileged_bpf_disabled", NULL,
+ unprivileged_bpf_disabled_orig);
+ for (i = 0; i < NUM_MAPS; i++)
+ unlink(map_paths[i]);
+ test_unpriv_bpf_disabled__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c b/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c
new file mode 100644
index 000000000000..35b87c7ba5be
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Oracle and/or its affiliates. */
+
+#include <test_progs.h>
+#include "test_uprobe_autoattach.skel.h"
+
+/* uprobe attach point */
+static noinline int autoattach_trigger_func(int arg)
+{
+ asm volatile ("");
+ return arg + 1;
+}
+
+void test_uprobe_autoattach(void)
+{
+ struct test_uprobe_autoattach *skel;
+ int trigger_val = 100, trigger_ret;
+ size_t malloc_sz = 1;
+ char *mem;
+
+ skel = test_uprobe_autoattach__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ if (!ASSERT_OK(test_uprobe_autoattach__attach(skel), "skel_attach"))
+ goto cleanup;
+
+ skel->bss->test_pid = getpid();
+
+ /* trigger & validate uprobe & uretprobe */
+ trigger_ret = autoattach_trigger_func(trigger_val);
+
+ skel->bss->test_pid = getpid();
+
+ /* trigger & validate shared library u[ret]probes attached by name */
+ mem = malloc(malloc_sz);
+
+ ASSERT_EQ(skel->bss->uprobe_byname_parm1, trigger_val, "check_uprobe_byname_parm1");
+ ASSERT_EQ(skel->bss->uprobe_byname_ran, 1, "check_uprobe_byname_ran");
+ ASSERT_EQ(skel->bss->uretprobe_byname_rc, trigger_ret, "check_uretprobe_byname_rc");
+ ASSERT_EQ(skel->bss->uretprobe_byname_ran, 2, "check_uretprobe_byname_ran");
+ ASSERT_EQ(skel->bss->uprobe_byname2_parm1, malloc_sz, "check_uprobe_byname2_parm1");
+ ASSERT_EQ(skel->bss->uprobe_byname2_ran, 3, "check_uprobe_byname2_ran");
+ ASSERT_EQ(skel->bss->uretprobe_byname2_rc, mem, "check_uretprobe_byname2_rc");
+ ASSERT_EQ(skel->bss->uretprobe_byname2_ran, 4, "check_uretprobe_byname2_ran");
+
+ free(mem);
+cleanup:
+ test_uprobe_autoattach__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/usdt.c b/tools/testing/selftests/bpf/prog_tests/usdt.c
new file mode 100644
index 000000000000..9ad9da0f215e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/usdt.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+
+#define _SDT_HAS_SEMAPHORES 1
+#include "../sdt.h"
+
+#include "test_usdt.skel.h"
+#include "test_urandom_usdt.skel.h"
+
+int lets_test_this(int);
+
+static volatile int idx = 2;
+static volatile __u64 bla = 0xFEDCBA9876543210ULL;
+static volatile short nums[] = {-1, -2, -3, -4};
+
+static volatile struct {
+ int x;
+ signed char y;
+} t1 = { 1, -127 };
+
+#define SEC(name) __attribute__((section(name), used))
+
+unsigned short test_usdt0_semaphore SEC(".probes");
+unsigned short test_usdt3_semaphore SEC(".probes");
+unsigned short test_usdt12_semaphore SEC(".probes");
+
+static void __always_inline trigger_func(int x) {
+ long y = 42;
+
+ if (test_usdt0_semaphore)
+ STAP_PROBE(test, usdt0);
+ if (test_usdt3_semaphore)
+ STAP_PROBE3(test, usdt3, x, y, &bla);
+ if (test_usdt12_semaphore) {
+ STAP_PROBE12(test, usdt12,
+ x, x + 1, y, x + y, 5,
+ y / 7, bla, &bla, -9, nums[x],
+ nums[idx], t1.y);
+ }
+}
+
+static void subtest_basic_usdt(void)
+{
+ LIBBPF_OPTS(bpf_usdt_opts, opts);
+ struct test_usdt *skel;
+ struct test_usdt__bss *bss;
+ int err;
+
+ skel = test_usdt__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bss = skel->bss;
+ bss->my_pid = getpid();
+
+ err = test_usdt__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* usdt0 won't be auto-attached */
+ opts.usdt_cookie = 0xcafedeadbeeffeed;
+ skel->links.usdt0 = bpf_program__attach_usdt(skel->progs.usdt0,
+ 0 /*self*/, "/proc/self/exe",
+ "test", "usdt0", &opts);
+ if (!ASSERT_OK_PTR(skel->links.usdt0, "usdt0_link"))
+ goto cleanup;
+
+ trigger_func(1);
+
+ ASSERT_EQ(bss->usdt0_called, 1, "usdt0_called");
+ ASSERT_EQ(bss->usdt3_called, 1, "usdt3_called");
+ ASSERT_EQ(bss->usdt12_called, 1, "usdt12_called");
+
+ ASSERT_EQ(bss->usdt0_cookie, 0xcafedeadbeeffeed, "usdt0_cookie");
+ ASSERT_EQ(bss->usdt0_arg_cnt, 0, "usdt0_arg_cnt");
+ ASSERT_EQ(bss->usdt0_arg_ret, -ENOENT, "usdt0_arg_ret");
+
+ /* auto-attached usdt3 gets default zero cookie value */
+ ASSERT_EQ(bss->usdt3_cookie, 0, "usdt3_cookie");
+ ASSERT_EQ(bss->usdt3_arg_cnt, 3, "usdt3_arg_cnt");
+
+ ASSERT_EQ(bss->usdt3_arg_rets[0], 0, "usdt3_arg1_ret");
+ ASSERT_EQ(bss->usdt3_arg_rets[1], 0, "usdt3_arg2_ret");
+ ASSERT_EQ(bss->usdt3_arg_rets[2], 0, "usdt3_arg3_ret");
+ ASSERT_EQ(bss->usdt3_args[0], 1, "usdt3_arg1");
+ ASSERT_EQ(bss->usdt3_args[1], 42, "usdt3_arg2");
+ ASSERT_EQ(bss->usdt3_args[2], (uintptr_t)&bla, "usdt3_arg3");
+
+ /* auto-attached usdt12 gets default zero cookie value */
+ ASSERT_EQ(bss->usdt12_cookie, 0, "usdt12_cookie");
+ ASSERT_EQ(bss->usdt12_arg_cnt, 12, "usdt12_arg_cnt");
+
+ ASSERT_EQ(bss->usdt12_args[0], 1, "usdt12_arg1");
+ ASSERT_EQ(bss->usdt12_args[1], 1 + 1, "usdt12_arg2");
+ ASSERT_EQ(bss->usdt12_args[2], 42, "usdt12_arg3");
+ ASSERT_EQ(bss->usdt12_args[3], 42 + 1, "usdt12_arg4");
+ ASSERT_EQ(bss->usdt12_args[4], 5, "usdt12_arg5");
+ ASSERT_EQ(bss->usdt12_args[5], 42 / 7, "usdt12_arg6");
+ ASSERT_EQ(bss->usdt12_args[6], bla, "usdt12_arg7");
+ ASSERT_EQ(bss->usdt12_args[7], (uintptr_t)&bla, "usdt12_arg8");
+ ASSERT_EQ(bss->usdt12_args[8], -9, "usdt12_arg9");
+ ASSERT_EQ(bss->usdt12_args[9], nums[1], "usdt12_arg10");
+ ASSERT_EQ(bss->usdt12_args[10], nums[idx], "usdt12_arg11");
+ ASSERT_EQ(bss->usdt12_args[11], t1.y, "usdt12_arg12");
+
+ /* trigger_func() is marked __always_inline, so USDT invocations will be
+ * inlined in two different places, meaning that each USDT will have
+ * at least 2 different places to be attached to. This verifies that
+ * bpf_program__attach_usdt() handles this properly and attaches to
+ * all possible places of USDT invocation.
+ */
+ trigger_func(2);
+
+ ASSERT_EQ(bss->usdt0_called, 2, "usdt0_called");
+ ASSERT_EQ(bss->usdt3_called, 2, "usdt3_called");
+ ASSERT_EQ(bss->usdt12_called, 2, "usdt12_called");
+
+ /* only check values that depend on trigger_func()'s input value */
+ ASSERT_EQ(bss->usdt3_args[0], 2, "usdt3_arg1");
+
+ ASSERT_EQ(bss->usdt12_args[0], 2, "usdt12_arg1");
+ ASSERT_EQ(bss->usdt12_args[1], 2 + 1, "usdt12_arg2");
+ ASSERT_EQ(bss->usdt12_args[3], 42 + 2, "usdt12_arg4");
+ ASSERT_EQ(bss->usdt12_args[9], nums[2], "usdt12_arg10");
+
+ /* detach and re-attach usdt3 */
+ bpf_link__destroy(skel->links.usdt3);
+
+ opts.usdt_cookie = 0xBADC00C51E;
+ skel->links.usdt3 = bpf_program__attach_usdt(skel->progs.usdt3, -1 /* any pid */,
+ "/proc/self/exe", "test", "usdt3", &opts);
+ if (!ASSERT_OK_PTR(skel->links.usdt3, "usdt3_reattach"))
+ goto cleanup;
+
+ trigger_func(3);
+
+ ASSERT_EQ(bss->usdt3_called, 3, "usdt3_called");
+ /* this time usdt3 has custom cookie */
+ ASSERT_EQ(bss->usdt3_cookie, 0xBADC00C51E, "usdt3_cookie");
+ ASSERT_EQ(bss->usdt3_arg_cnt, 3, "usdt3_arg_cnt");
+
+ ASSERT_EQ(bss->usdt3_arg_rets[0], 0, "usdt3_arg1_ret");
+ ASSERT_EQ(bss->usdt3_arg_rets[1], 0, "usdt3_arg2_ret");
+ ASSERT_EQ(bss->usdt3_arg_rets[2], 0, "usdt3_arg3_ret");
+ ASSERT_EQ(bss->usdt3_args[0], 3, "usdt3_arg1");
+ ASSERT_EQ(bss->usdt3_args[1], 42, "usdt3_arg2");
+ ASSERT_EQ(bss->usdt3_args[2], (uintptr_t)&bla, "usdt3_arg3");
+
+cleanup:
+ test_usdt__destroy(skel);
+}
+
+unsigned short test_usdt_100_semaphore SEC(".probes");
+unsigned short test_usdt_300_semaphore SEC(".probes");
+unsigned short test_usdt_400_semaphore SEC(".probes");
+
+#define R10(F, X) F(X+0); F(X+1);F(X+2); F(X+3); F(X+4); \
+ F(X+5); F(X+6); F(X+7); F(X+8); F(X+9);
+#define R100(F, X) R10(F,X+ 0);R10(F,X+10);R10(F,X+20);R10(F,X+30);R10(F,X+40); \
+ R10(F,X+50);R10(F,X+60);R10(F,X+70);R10(F,X+80);R10(F,X+90);
+
+/* carefully control that we get exactly 100 inlines by preventing inlining */
+static void __always_inline f100(int x)
+{
+ STAP_PROBE1(test, usdt_100, x);
+}
+
+__weak void trigger_100_usdts(void)
+{
+ R100(f100, 0);
+}
+
+/* we shouldn't be able to attach to test:usdt2_300 USDT as we don't have as
+ * many slots for specs. It's important that each STAP_PROBE2() invocation
+ * (after untolling) gets different arg spec due to compiler inlining i as
+ * a constant
+ */
+static void __always_inline f300(int x)
+{
+ STAP_PROBE1(test, usdt_300, x);
+}
+
+__weak void trigger_300_usdts(void)
+{
+ R100(f300, 0);
+ R100(f300, 100);
+ R100(f300, 200);
+}
+
+static void __always_inline f400(int x __attribute__((unused)))
+{
+ STAP_PROBE1(test, usdt_400, 400);
+}
+
+/* this time we have 400 different USDT call sites, but they have uniform
+ * argument location, so libbpf's spec string deduplication logic should keep
+ * spec count use very small and so we should be able to attach to all 400
+ * call sites
+ */
+__weak void trigger_400_usdts(void)
+{
+ R100(f400, 0);
+ R100(f400, 100);
+ R100(f400, 200);
+ R100(f400, 300);
+}
+
+static void subtest_multispec_usdt(void)
+{
+ LIBBPF_OPTS(bpf_usdt_opts, opts);
+ struct test_usdt *skel;
+ struct test_usdt__bss *bss;
+ int err, i;
+
+ skel = test_usdt__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bss = skel->bss;
+ bss->my_pid = getpid();
+
+ err = test_usdt__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* usdt_100 is auto-attached and there are 100 inlined call sites,
+ * let's validate that all of them are properly attached to and
+ * handled from BPF side
+ */
+ trigger_100_usdts();
+
+ ASSERT_EQ(bss->usdt_100_called, 100, "usdt_100_called");
+ ASSERT_EQ(bss->usdt_100_sum, 99 * 100 / 2, "usdt_100_sum");
+
+ /* Stress test free spec ID tracking. By default libbpf allows up to
+ * 256 specs to be used, so if we don't return free spec IDs back
+ * after few detachments and re-attachments we should run out of
+ * available spec IDs.
+ */
+ for (i = 0; i < 2; i++) {
+ bpf_link__destroy(skel->links.usdt_100);
+
+ skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1,
+ "/proc/self/exe",
+ "test", "usdt_100", NULL);
+ if (!ASSERT_OK_PTR(skel->links.usdt_100, "usdt_100_reattach"))
+ goto cleanup;
+
+ bss->usdt_100_sum = 0;
+ trigger_100_usdts();
+
+ ASSERT_EQ(bss->usdt_100_called, (i + 1) * 100 + 100, "usdt_100_called");
+ ASSERT_EQ(bss->usdt_100_sum, 99 * 100 / 2, "usdt_100_sum");
+ }
+
+ /* Now let's step it up and try to attach USDT that requires more than
+ * 256 attach points with different specs for each.
+ * Note that we need trigger_300_usdts() only to actually have 300
+ * USDT call sites, we are not going to actually trace them.
+ */
+ trigger_300_usdts();
+
+ /* we'll reuse usdt_100 BPF program for usdt_300 test */
+ bpf_link__destroy(skel->links.usdt_100);
+ skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1, "/proc/self/exe",
+ "test", "usdt_300", NULL);
+ err = -errno;
+ if (!ASSERT_ERR_PTR(skel->links.usdt_100, "usdt_300_bad_attach"))
+ goto cleanup;
+ ASSERT_EQ(err, -E2BIG, "usdt_300_attach_err");
+
+ /* let's check that there are no "dangling" BPF programs attached due
+ * to partial success of the above test:usdt_300 attachment
+ */
+ bss->usdt_100_called = 0;
+ bss->usdt_100_sum = 0;
+
+ f300(777); /* this is 301st instance of usdt_300 */
+
+ ASSERT_EQ(bss->usdt_100_called, 0, "usdt_301_called");
+ ASSERT_EQ(bss->usdt_100_sum, 0, "usdt_301_sum");
+
+ /* This time we have USDT with 400 inlined invocations, but arg specs
+ * should be the same across all sites, so libbpf will only need to
+ * use one spec and thus we'll be able to attach 400 uprobes
+ * successfully.
+ *
+ * Again, we are reusing usdt_100 BPF program.
+ */
+ skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1,
+ "/proc/self/exe",
+ "test", "usdt_400", NULL);
+ if (!ASSERT_OK_PTR(skel->links.usdt_100, "usdt_400_attach"))
+ goto cleanup;
+
+ trigger_400_usdts();
+
+ ASSERT_EQ(bss->usdt_100_called, 400, "usdt_400_called");
+ ASSERT_EQ(bss->usdt_100_sum, 400 * 400, "usdt_400_sum");
+
+cleanup:
+ test_usdt__destroy(skel);
+}
+
+static FILE *urand_spawn(int *pid)
+{
+ FILE *f;
+
+ /* urandom_read's stdout is wired into f */
+ f = popen("./urandom_read 1 report-pid", "r");
+ if (!f)
+ return NULL;
+
+ if (fscanf(f, "%d", pid) != 1) {
+ pclose(f);
+ return NULL;
+ }
+
+ return f;
+}
+
+static int urand_trigger(FILE **urand_pipe)
+{
+ int exit_code;
+
+ /* pclose() waits for child process to exit and returns their exit code */
+ exit_code = pclose(*urand_pipe);
+ *urand_pipe = NULL;
+
+ return exit_code;
+}
+
+static void subtest_urandom_usdt(bool auto_attach)
+{
+ struct test_urandom_usdt *skel;
+ struct test_urandom_usdt__bss *bss;
+ struct bpf_link *l;
+ FILE *urand_pipe = NULL;
+ int err, urand_pid = 0;
+
+ skel = test_urandom_usdt__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ urand_pipe = urand_spawn(&urand_pid);
+ if (!ASSERT_OK_PTR(urand_pipe, "urand_spawn"))
+ goto cleanup;
+
+ bss = skel->bss;
+ bss->urand_pid = urand_pid;
+
+ if (auto_attach) {
+ err = test_urandom_usdt__attach(skel);
+ if (!ASSERT_OK(err, "skel_auto_attach"))
+ goto cleanup;
+ } else {
+ l = bpf_program__attach_usdt(skel->progs.urand_read_without_sema,
+ urand_pid, "./urandom_read",
+ "urand", "read_without_sema", NULL);
+ if (!ASSERT_OK_PTR(l, "urand_without_sema_attach"))
+ goto cleanup;
+ skel->links.urand_read_without_sema = l;
+
+ l = bpf_program__attach_usdt(skel->progs.urand_read_with_sema,
+ urand_pid, "./urandom_read",
+ "urand", "read_with_sema", NULL);
+ if (!ASSERT_OK_PTR(l, "urand_with_sema_attach"))
+ goto cleanup;
+ skel->links.urand_read_with_sema = l;
+
+ l = bpf_program__attach_usdt(skel->progs.urandlib_read_without_sema,
+ urand_pid, "./liburandom_read.so",
+ "urandlib", "read_without_sema", NULL);
+ if (!ASSERT_OK_PTR(l, "urandlib_without_sema_attach"))
+ goto cleanup;
+ skel->links.urandlib_read_without_sema = l;
+
+ l = bpf_program__attach_usdt(skel->progs.urandlib_read_with_sema,
+ urand_pid, "./liburandom_read.so",
+ "urandlib", "read_with_sema", NULL);
+ if (!ASSERT_OK_PTR(l, "urandlib_with_sema_attach"))
+ goto cleanup;
+ skel->links.urandlib_read_with_sema = l;
+
+ }
+
+ /* trigger urandom_read USDTs */
+ ASSERT_OK(urand_trigger(&urand_pipe), "urand_exit_code");
+
+ ASSERT_EQ(bss->urand_read_without_sema_call_cnt, 1, "urand_wo_sema_cnt");
+ ASSERT_EQ(bss->urand_read_without_sema_buf_sz_sum, 256, "urand_wo_sema_sum");
+
+ ASSERT_EQ(bss->urand_read_with_sema_call_cnt, 1, "urand_w_sema_cnt");
+ ASSERT_EQ(bss->urand_read_with_sema_buf_sz_sum, 256, "urand_w_sema_sum");
+
+ ASSERT_EQ(bss->urandlib_read_without_sema_call_cnt, 1, "urandlib_wo_sema_cnt");
+ ASSERT_EQ(bss->urandlib_read_without_sema_buf_sz_sum, 256, "urandlib_wo_sema_sum");
+
+ ASSERT_EQ(bss->urandlib_read_with_sema_call_cnt, 1, "urandlib_w_sema_cnt");
+ ASSERT_EQ(bss->urandlib_read_with_sema_buf_sz_sum, 256, "urandlib_w_sema_sum");
+
+cleanup:
+ if (urand_pipe)
+ pclose(urand_pipe);
+ test_urandom_usdt__destroy(skel);
+}
+
+void test_usdt(void)
+{
+ if (test__start_subtest("basic"))
+ subtest_basic_usdt();
+ if (test__start_subtest("multispec"))
+ subtest_multispec_usdt();
+ if (test__start_subtest("urand_auto_attach"))
+ subtest_urandom_usdt(true /* auto_attach */);
+ if (test__start_subtest("urand_pid_attach"))
+ subtest_urandom_usdt(false /* auto_attach */);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c
new file mode 100644
index 000000000000..874a846e298c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: LGPL-2.1 OR BSD-2-Clause
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#define _GNU_SOURCE
+#include <test_progs.h>
+#include <network_helpers.h>
+#include <ctype.h>
+
+#define CMD_OUT_BUF_SIZE 1023
+
+#define SYS(cmd) ({ \
+ if (!ASSERT_OK(system(cmd), (cmd))) \
+ goto out; \
+})
+
+#define SYS_OUT(cmd, ...) ({ \
+ char buf[1024]; \
+ snprintf(buf, sizeof(buf), (cmd), ##__VA_ARGS__); \
+ FILE *f = popen(buf, "r"); \
+ if (!ASSERT_OK_PTR(f, buf)) \
+ goto out; \
+ f; \
+})
+
+/* out must be at least `size * 4 + 1` bytes long */
+static void escape_str(char *out, const char *in, size_t size)
+{
+ static const char *hex = "0123456789ABCDEF";
+ size_t i;
+
+ for (i = 0; i < size; i++) {
+ if (isprint(in[i]) && in[i] != '\\' && in[i] != '\'') {
+ *out++ = in[i];
+ } else {
+ *out++ = '\\';
+ *out++ = 'x';
+ *out++ = hex[(in[i] >> 4) & 0xf];
+ *out++ = hex[in[i] & 0xf];
+ }
+ }
+ *out++ = '\0';
+}
+
+static bool expect_str(char *buf, size_t size, const char *str, const char *name)
+{
+ static char escbuf_expected[CMD_OUT_BUF_SIZE * 4];
+ static char escbuf_actual[CMD_OUT_BUF_SIZE * 4];
+ static int duration = 0;
+ bool ok;
+
+ ok = size == strlen(str) && !memcmp(buf, str, size);
+
+ if (!ok) {
+ escape_str(escbuf_expected, str, strlen(str));
+ escape_str(escbuf_actual, buf, size);
+ }
+ CHECK(!ok, name, "unexpected %s: actual '%s' != expected '%s'\n",
+ name, escbuf_actual, escbuf_expected);
+
+ return ok;
+}
+
+static void test_synproxy(bool xdp)
+{
+ int server_fd = -1, client_fd = -1, accept_fd = -1;
+ char *prog_id = NULL, *prog_id_end;
+ struct nstoken *ns = NULL;
+ FILE *ctrl_file = NULL;
+ char buf[CMD_OUT_BUF_SIZE];
+ size_t size;
+
+ SYS("ip netns add synproxy");
+
+ SYS("ip link add tmp0 type veth peer name tmp1");
+ SYS("ip link set tmp1 netns synproxy");
+ SYS("ip link set tmp0 up");
+ SYS("ip addr replace 198.18.0.1/24 dev tmp0");
+
+ /* When checksum offload is enabled, the XDP program sees wrong
+ * checksums and drops packets.
+ */
+ SYS("ethtool -K tmp0 tx off");
+ if (xdp)
+ /* Workaround required for veth. */
+ SYS("ip link set tmp0 xdp object xdp_dummy.o section xdp 2> /dev/null");
+
+ ns = open_netns("synproxy");
+ if (!ASSERT_OK_PTR(ns, "setns"))
+ goto out;
+
+ SYS("ip link set lo up");
+ SYS("ip link set tmp1 up");
+ SYS("ip addr replace 198.18.0.2/24 dev tmp1");
+ SYS("sysctl -w net.ipv4.tcp_syncookies=2");
+ SYS("sysctl -w net.ipv4.tcp_timestamps=1");
+ SYS("sysctl -w net.netfilter.nf_conntrack_tcp_loose=0");
+ SYS("iptables -t raw -I PREROUTING \
+ -i tmp1 -p tcp -m tcp --syn --dport 8080 -j CT --notrack");
+ SYS("iptables -t filter -A INPUT \
+ -i tmp1 -p tcp -m tcp --dport 8080 -m state --state INVALID,UNTRACKED \
+ -j SYNPROXY --sack-perm --timestamp --wscale 7 --mss 1460");
+ SYS("iptables -t filter -A INPUT \
+ -i tmp1 -m state --state INVALID -j DROP");
+
+ ctrl_file = SYS_OUT("./xdp_synproxy --iface tmp1 --ports 8080 \
+ --single --mss4 1460 --mss6 1440 \
+ --wscale 7 --ttl 64%s", xdp ? "" : " --tc");
+ size = fread(buf, 1, sizeof(buf), ctrl_file);
+ pclose(ctrl_file);
+ if (!expect_str(buf, size, "Total SYNACKs generated: 0\n",
+ "initial SYNACKs"))
+ goto out;
+
+ if (!xdp) {
+ ctrl_file = SYS_OUT("tc filter show dev tmp1 ingress");
+ size = fread(buf, 1, sizeof(buf), ctrl_file);
+ pclose(ctrl_file);
+ prog_id = memmem(buf, size, " id ", 4);
+ if (!ASSERT_OK_PTR(prog_id, "find prog id"))
+ goto out;
+ prog_id += 4;
+ if (!ASSERT_LT(prog_id, buf + size, "find prog id begin"))
+ goto out;
+ prog_id_end = prog_id;
+ while (prog_id_end < buf + size && *prog_id_end >= '0' &&
+ *prog_id_end <= '9')
+ prog_id_end++;
+ if (!ASSERT_LT(prog_id_end, buf + size, "find prog id end"))
+ goto out;
+ *prog_id_end = '\0';
+ }
+
+ server_fd = start_server(AF_INET, SOCK_STREAM, "198.18.0.2", 8080, 0);
+ if (!ASSERT_GE(server_fd, 0, "start_server"))
+ goto out;
+
+ close_netns(ns);
+ ns = NULL;
+
+ client_fd = connect_to_fd(server_fd, 10000);
+ if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
+ goto out;
+
+ accept_fd = accept(server_fd, NULL, NULL);
+ if (!ASSERT_GE(accept_fd, 0, "accept"))
+ goto out;
+
+ ns = open_netns("synproxy");
+ if (!ASSERT_OK_PTR(ns, "setns"))
+ goto out;
+
+ if (xdp)
+ ctrl_file = SYS_OUT("./xdp_synproxy --iface tmp1 --single");
+ else
+ ctrl_file = SYS_OUT("./xdp_synproxy --prog %s --single",
+ prog_id);
+ size = fread(buf, 1, sizeof(buf), ctrl_file);
+ pclose(ctrl_file);
+ if (!expect_str(buf, size, "Total SYNACKs generated: 1\n",
+ "SYNACKs after connection"))
+ goto out;
+
+out:
+ if (accept_fd >= 0)
+ close(accept_fd);
+ if (client_fd >= 0)
+ close(client_fd);
+ if (server_fd >= 0)
+ close(server_fd);
+ if (ns)
+ close_netns(ns);
+
+ system("ip link del tmp0");
+ system("ip netns del synproxy");
+}
+
+void test_xdp_synproxy(void)
+{
+ if (test__start_subtest("xdp"))
+ test_synproxy(true);
+ if (test__start_subtest("tc"))
+ test_synproxy(false);
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_hashmap_full_update_bench.c b/tools/testing/selftests/bpf/progs/bpf_hashmap_full_update_bench.c
new file mode 100644
index 000000000000..56957557e3e1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_hashmap_full_update_bench.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Bytedance */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+#define MAX_ENTRIES 1000
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, u32);
+ __type(value, u64);
+ __uint(max_entries, MAX_ENTRIES);
+} hash_map_bench SEC(".maps");
+
+u64 __attribute__((__aligned__(256))) percpu_time[256];
+u64 nr_loops;
+
+static int loop_update_callback(__u32 index, u32 *key)
+{
+ u64 init_val = 1;
+
+ bpf_map_update_elem(&hash_map_bench, key, &init_val, BPF_ANY);
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int benchmark(void *ctx)
+{
+ u32 cpu = bpf_get_smp_processor_id();
+ u32 key = cpu + MAX_ENTRIES;
+ u64 start_time = bpf_ktime_get_ns();
+
+ bpf_loop(nr_loops, loop_update_callback, &key, 0);
+ percpu_time[cpu & 255] = bpf_ktime_get_ns() - start_time;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter.h b/tools/testing/selftests/bpf/progs/bpf_iter.h
index 8cfaeba1ddbf..e9846606690d 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter.h
+++ b/tools/testing/selftests/bpf/progs/bpf_iter.h
@@ -16,11 +16,13 @@
#define bpf_iter__bpf_map_elem bpf_iter__bpf_map_elem___not_used
#define bpf_iter__bpf_sk_storage_map bpf_iter__bpf_sk_storage_map___not_used
#define bpf_iter__sockmap bpf_iter__sockmap___not_used
+#define bpf_iter__bpf_link bpf_iter__bpf_link___not_used
#define btf_ptr btf_ptr___not_used
#define BTF_F_COMPACT BTF_F_COMPACT___not_used
#define BTF_F_NONAME BTF_F_NONAME___not_used
#define BTF_F_PTR_RAW BTF_F_PTR_RAW___not_used
#define BTF_F_ZERO BTF_F_ZERO___not_used
+#define bpf_iter__ksym bpf_iter__ksym___not_used
#include "vmlinux.h"
#undef bpf_iter_meta
#undef bpf_iter__bpf_map
@@ -37,11 +39,13 @@
#undef bpf_iter__bpf_map_elem
#undef bpf_iter__bpf_sk_storage_map
#undef bpf_iter__sockmap
+#undef bpf_iter__bpf_link
#undef btf_ptr
#undef BTF_F_COMPACT
#undef BTF_F_NONAME
#undef BTF_F_PTR_RAW
#undef BTF_F_ZERO
+#undef bpf_iter__ksym
struct bpf_iter_meta {
struct seq_file *seq;
@@ -132,6 +136,11 @@ struct bpf_iter__sockmap {
struct sock *sk;
};
+struct bpf_iter__bpf_link {
+ struct bpf_iter_meta *meta;
+ struct bpf_link *link;
+};
+
struct btf_ptr {
void *ptr;
__u32 type_id;
@@ -144,3 +153,8 @@ enum {
BTF_F_PTR_RAW = (1ULL << 2),
BTF_F_ZERO = (1ULL << 3),
};
+
+struct bpf_iter__ksym {
+ struct bpf_iter_meta *meta;
+ struct kallsym_iter *ksym;
+};
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c
new file mode 100644
index 000000000000..e1af2f8f75a6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Red Hat, Inc. */
+#include "bpf_iter.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("iter/bpf_link")
+int dump_bpf_link(struct bpf_iter__bpf_link *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct bpf_link *link = ctx->link;
+ int link_id;
+
+ if (!link)
+ return 0;
+
+ link_id = link->id;
+ bpf_seq_write(seq, &link_id, sizeof(link_id));
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c b/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
new file mode 100644
index 000000000000..285c008cbf9c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Oracle and/or its affiliates. */
+#include "bpf_iter.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+unsigned long last_sym_value = 0;
+
+static inline char tolower(char c)
+{
+ if (c >= 'A' && c <= 'Z')
+ c += ('a' - 'A');
+ return c;
+}
+
+static inline char toupper(char c)
+{
+ if (c >= 'a' && c <= 'z')
+ c -= ('a' - 'A');
+ return c;
+}
+
+/* Dump symbols with max size; the latter is calculated by caching symbol N value
+ * and when iterating on symbol N+1, we can print max size of symbol N via
+ * address of N+1 - address of N.
+ */
+SEC("iter/ksym")
+int dump_ksym(struct bpf_iter__ksym *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct kallsym_iter *iter = ctx->ksym;
+ __u32 seq_num = ctx->meta->seq_num;
+ unsigned long value;
+ char type;
+ int ret;
+
+ if (!iter)
+ return 0;
+
+ if (seq_num == 0) {
+ BPF_SEQ_PRINTF(seq, "ADDR TYPE NAME MODULE_NAME KIND MAX_SIZE\n");
+ return 0;
+ }
+ if (last_sym_value)
+ BPF_SEQ_PRINTF(seq, "0x%x\n", iter->value - last_sym_value);
+ else
+ BPF_SEQ_PRINTF(seq, "\n");
+
+ value = iter->show_value ? iter->value : 0;
+
+ last_sym_value = value;
+
+ type = iter->type;
+
+ if (iter->module_name[0]) {
+ type = iter->exported ? toupper(type) : tolower(type);
+ BPF_SEQ_PRINTF(seq, "0x%llx %c %s [ %s ] ",
+ value, type, iter->name, iter->module_name);
+ } else {
+ BPF_SEQ_PRINTF(seq, "0x%llx %c %s ", value, type, iter->name);
+ }
+ if (!iter->pos_arch_end || iter->pos_arch_end > iter->pos)
+ BPF_SEQ_PRINTF(seq, "CORE ");
+ else if (!iter->pos_mod_end || iter->pos_mod_end > iter->pos)
+ BPF_SEQ_PRINTF(seq, "MOD ");
+ else if (!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > iter->pos)
+ BPF_SEQ_PRINTF(seq, "FTRACE_MOD ");
+ else if (!iter->pos_bpf_end || iter->pos_bpf_end > iter->pos)
+ BPF_SEQ_PRINTF(seq, "BPF ");
+ else
+ BPF_SEQ_PRINTF(seq, "KPROBE ");
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_loop.c b/tools/testing/selftests/bpf/progs/bpf_loop.c
index e08565282759..de1fc82d2710 100644
--- a/tools/testing/selftests/bpf/progs/bpf_loop.c
+++ b/tools/testing/selftests/bpf/progs/bpf_loop.c
@@ -11,11 +11,19 @@ struct callback_ctx {
int output;
};
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 32);
+ __type(key, int);
+ __type(value, int);
+} map1 SEC(".maps");
+
/* These should be set by the user program */
u32 nested_callback_nr_loops;
u32 stop_index = -1;
u32 nr_loops;
int pid;
+int callback_selector;
/* Making these global variables so that the userspace program
* can verify the output through the skeleton
@@ -111,3 +119,109 @@ int prog_nested_calls(void *ctx)
return 0;
}
+
+static int callback_set_f0(int i, void *ctx)
+{
+ g_output = 0xF0;
+ return 0;
+}
+
+static int callback_set_0f(int i, void *ctx)
+{
+ g_output = 0x0F;
+ return 0;
+}
+
+/*
+ * non-constant callback is a corner case for bpf_loop inline logic
+ */
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int prog_non_constant_callback(void *ctx)
+{
+ struct callback_ctx data = {};
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ int (*callback)(int i, void *ctx);
+
+ g_output = 0;
+
+ if (callback_selector == 0x0F)
+ callback = callback_set_0f;
+ else
+ callback = callback_set_f0;
+
+ bpf_loop(1, callback, NULL, 0);
+
+ return 0;
+}
+
+static int stack_check_inner_callback(void *ctx)
+{
+ return 0;
+}
+
+static int map1_lookup_elem(int key)
+{
+ int *val = bpf_map_lookup_elem(&map1, &key);
+
+ return val ? *val : -1;
+}
+
+static void map1_update_elem(int key, int val)
+{
+ bpf_map_update_elem(&map1, &key, &val, BPF_ANY);
+}
+
+static int stack_check_outer_callback(void *ctx)
+{
+ int a = map1_lookup_elem(1);
+ int b = map1_lookup_elem(2);
+ int c = map1_lookup_elem(3);
+ int d = map1_lookup_elem(4);
+ int e = map1_lookup_elem(5);
+ int f = map1_lookup_elem(6);
+
+ bpf_loop(1, stack_check_inner_callback, NULL, 0);
+
+ map1_update_elem(1, a + 1);
+ map1_update_elem(2, b + 1);
+ map1_update_elem(3, c + 1);
+ map1_update_elem(4, d + 1);
+ map1_update_elem(5, e + 1);
+ map1_update_elem(6, f + 1);
+
+ return 0;
+}
+
+/* Some of the local variables in stack_check and
+ * stack_check_outer_callback would be allocated on stack by
+ * compiler. This test should verify that stack content for these
+ * variables is preserved between calls to bpf_loop (might be an issue
+ * if loop inlining allocates stack slots incorrectly).
+ */
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int stack_check(void *ctx)
+{
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ int a = map1_lookup_elem(7);
+ int b = map1_lookup_elem(8);
+ int c = map1_lookup_elem(9);
+ int d = map1_lookup_elem(10);
+ int e = map1_lookup_elem(11);
+ int f = map1_lookup_elem(12);
+
+ bpf_loop(1, stack_check_outer_callback, NULL, 0);
+
+ map1_update_elem(7, a + 1);
+ map1_update_elem(8, b + 1);
+ map1_update_elem(9, c + 1);
+ map1_update_elem(10, d + 1);
+ map1_update_elem(11, e + 1);
+ map1_update_elem(12, f + 1);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c b/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c
index 05838ed9b89c..e1e11897e99b 100644
--- a/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c
+++ b/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c
@@ -64,9 +64,9 @@ int BPF_KPROBE(handle_sys_prctl)
return 0;
}
-SEC("kprobe/" SYS_PREFIX "sys_prctl")
-int BPF_KPROBE_SYSCALL(prctl_enter, int option, unsigned long arg2,
- unsigned long arg3, unsigned long arg4, unsigned long arg5)
+SEC("ksyscall/prctl")
+int BPF_KSYSCALL(prctl_enter, int option, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4, unsigned long arg5)
{
pid_t pid = bpf_get_current_pid_tgid() >> 32;
diff --git a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
index 1c1289ba5fc5..98dd2c4815f0 100644
--- a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
+++ b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
@@ -8,6 +8,7 @@
#define SOL_SOCKET 1
#define SO_SNDBUF 7
#define __SO_ACCEPTCON (1 << 16)
+#define SO_PRIORITY 12
#define SOL_TCP 6
#define TCP_CONGESTION 13
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val.c
new file mode 100644
index 000000000000..888e79db6a77
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_enum64val x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___diff.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___diff.c
new file mode 100644
index 000000000000..194749130d87
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___diff.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_enum64val___diff x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___err_missing.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___err_missing.c
new file mode 100644
index 000000000000..3d732d4193e4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___err_missing.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_enum64val___err_missing x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___val3_missing.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___val3_missing.c
new file mode 100644
index 000000000000..17cf5d6a848d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___val3_missing.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_enum64val___val3_missing x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_offs.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_offs.c
new file mode 100644
index 000000000000..3824345d82ab
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_offs.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_size___diff_offs x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___diff.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___diff.c
new file mode 100644
index 000000000000..57ae2c258928
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___diff.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_type_based___diff x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
index 1c7105fcae3c..4ee4748133fe 100644
--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
+++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
@@ -94,7 +94,7 @@ typedef void (* (*signal_t)(int, void (*)(int)))(int);
typedef char * (*fn_ptr_arr1_t[10])(int **);
-typedef char * (* const (* const fn_ptr_arr2_t[5])())(char * (*)(int));
+typedef char * (* (* const fn_ptr_arr2_t[5])())(char * (*)(int));
struct struct_w_typedefs {
int_t a;
diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h
index c95c0cabe951..fd8e1b4c6762 100644
--- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
+++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
@@ -13,6 +13,7 @@ struct core_reloc_kernel_output {
int valid[10];
char comm[sizeof("test_progs")];
int comm_len;
+ bool local_task_struct_matches;
};
/*
@@ -785,13 +786,21 @@ struct core_reloc_bitfields___err_too_big_bitfield {
*/
struct core_reloc_size_output {
int int_sz;
+ int int_off;
int struct_sz;
+ int struct_off;
int union_sz;
+ int union_off;
int arr_sz;
+ int arr_off;
int arr_elem_sz;
+ int arr_elem_off;
int ptr_sz;
+ int ptr_off;
int enum_sz;
+ int enum_off;
int float_sz;
+ int float_off;
};
struct core_reloc_size {
@@ -814,6 +823,16 @@ struct core_reloc_size___diff_sz {
double float_field;
};
+struct core_reloc_size___diff_offs {
+ float float_field;
+ enum { YET_OTHER_VALUE = 123 } enum_field;
+ void *ptr_field;
+ int arr_field[4];
+ union { int x; } union_field;
+ struct { int x; } struct_field;
+ int int_field;
+};
+
/* Error case of two candidates with the fields (int_field) at the same
* offset, but with differing final relocation values: size 4 vs size 1
*/
@@ -842,10 +861,11 @@ struct core_reloc_size___err_ambiguous2 {
};
/*
- * TYPE EXISTENCE & SIZE
+ * TYPE EXISTENCE, MATCH & SIZE
*/
struct core_reloc_type_based_output {
bool struct_exists;
+ bool complex_struct_exists;
bool union_exists;
bool enum_exists;
bool typedef_named_struct_exists;
@@ -854,9 +874,24 @@ struct core_reloc_type_based_output {
bool typedef_int_exists;
bool typedef_enum_exists;
bool typedef_void_ptr_exists;
+ bool typedef_restrict_ptr_exists;
bool typedef_func_proto_exists;
bool typedef_arr_exists;
+ bool struct_matches;
+ bool complex_struct_matches;
+ bool union_matches;
+ bool enum_matches;
+ bool typedef_named_struct_matches;
+ bool typedef_anon_struct_matches;
+ bool typedef_struct_ptr_matches;
+ bool typedef_int_matches;
+ bool typedef_enum_matches;
+ bool typedef_void_ptr_matches;
+ bool typedef_restrict_ptr_matches;
+ bool typedef_func_proto_matches;
+ bool typedef_arr_matches;
+
int struct_sz;
int union_sz;
int enum_sz;
@@ -874,6 +909,14 @@ struct a_struct {
int x;
};
+struct a_complex_struct {
+ union {
+ struct a_struct * restrict a;
+ void *b;
+ } x;
+ volatile long y;
+};
+
union a_union {
int y;
int z;
@@ -898,6 +941,7 @@ typedef int int_typedef;
typedef enum { TYPEDEF_ENUM_VAL1, TYPEDEF_ENUM_VAL2 } enum_typedef;
typedef void *void_ptr_typedef;
+typedef int *restrict restrict_ptr_typedef;
typedef int (*func_proto_typedef)(long);
@@ -905,22 +949,86 @@ typedef char arr_typedef[20];
struct core_reloc_type_based {
struct a_struct f1;
- union a_union f2;
- enum an_enum f3;
- named_struct_typedef f4;
- anon_struct_typedef f5;
- struct_ptr_typedef f6;
- int_typedef f7;
- enum_typedef f8;
- void_ptr_typedef f9;
- func_proto_typedef f10;
- arr_typedef f11;
+ struct a_complex_struct f2;
+ union a_union f3;
+ enum an_enum f4;
+ named_struct_typedef f5;
+ anon_struct_typedef f6;
+ struct_ptr_typedef f7;
+ int_typedef f8;
+ enum_typedef f9;
+ void_ptr_typedef f10;
+ restrict_ptr_typedef f11;
+ func_proto_typedef f12;
+ arr_typedef f13;
};
/* no types in target */
struct core_reloc_type_based___all_missing {
};
+/* different member orders, enum variant values, signedness, etc */
+struct a_struct___diff {
+ int x;
+ int a;
+};
+
+struct a_struct___forward;
+
+struct a_complex_struct___diff {
+ union {
+ struct a_struct___forward *a;
+ void *b;
+ } x;
+ volatile long y;
+};
+
+union a_union___diff {
+ int z;
+ int y;
+};
+
+typedef struct a_struct___diff named_struct_typedef___diff;
+
+typedef struct { int z, x, y; } anon_struct_typedef___diff;
+
+typedef struct {
+ int c;
+ int b;
+ int a;
+} *struct_ptr_typedef___diff;
+
+enum an_enum___diff {
+ AN_ENUM_VAL2___diff = 0,
+ AN_ENUM_VAL1___diff = 42,
+ AN_ENUM_VAL3___diff = 1,
+};
+
+typedef unsigned int int_typedef___diff;
+
+typedef enum { TYPEDEF_ENUM_VAL2___diff, TYPEDEF_ENUM_VAL1___diff = 50 } enum_typedef___diff;
+
+typedef const void *void_ptr_typedef___diff;
+
+typedef int_typedef___diff (*func_proto_typedef___diff)(long);
+
+typedef char arr_typedef___diff[3];
+
+struct core_reloc_type_based___diff {
+ struct a_struct___diff f1;
+ struct a_complex_struct___diff f2;
+ union a_union___diff f3;
+ enum an_enum___diff f4;
+ named_struct_typedef___diff f5;
+ anon_struct_typedef___diff f6;
+ struct_ptr_typedef___diff f7;
+ int_typedef___diff f8;
+ enum_typedef___diff f9;
+ void_ptr_typedef___diff f10;
+ func_proto_typedef___diff f11;
+ arr_typedef___diff f12;
+};
+
/* different type sizes, extra modifiers, anon vs named enums, etc */
struct a_struct___diff_sz {
long x;
@@ -1099,6 +1207,20 @@ struct core_reloc_enumval_output {
int anon_val2;
};
+struct core_reloc_enum64val_output {
+ bool unsigned_val1_exists;
+ bool unsigned_val2_exists;
+ bool unsigned_val3_exists;
+ bool signed_val1_exists;
+ bool signed_val2_exists;
+ bool signed_val3_exists;
+
+ long unsigned_val1;
+ long unsigned_val2;
+ long signed_val1;
+ long signed_val2;
+};
+
enum named_enum {
NAMED_ENUM_VAL1 = 1,
NAMED_ENUM_VAL2 = 2,
@@ -1116,6 +1238,23 @@ struct core_reloc_enumval {
anon_enum f2;
};
+enum named_unsigned_enum64 {
+ UNSIGNED_ENUM64_VAL1 = 0x1ffffffffULL,
+ UNSIGNED_ENUM64_VAL2 = 0x2,
+ UNSIGNED_ENUM64_VAL3 = 0x3ffffffffULL,
+};
+
+enum named_signed_enum64 {
+ SIGNED_ENUM64_VAL1 = 0x1ffffffffLL,
+ SIGNED_ENUM64_VAL2 = -2,
+ SIGNED_ENUM64_VAL3 = 0x3ffffffffLL,
+};
+
+struct core_reloc_enum64val {
+ enum named_unsigned_enum64 f1;
+ enum named_signed_enum64 f2;
+};
+
/* differing enumerator values */
enum named_enum___diff {
NAMED_ENUM_VAL1___diff = 101,
@@ -1134,6 +1273,23 @@ struct core_reloc_enumval___diff {
anon_enum___diff f2;
};
+enum named_unsigned_enum64___diff {
+ UNSIGNED_ENUM64_VAL1___diff = 0x101ffffffffULL,
+ UNSIGNED_ENUM64_VAL2___diff = 0x202ffffffffULL,
+ UNSIGNED_ENUM64_VAL3___diff = 0x303ffffffffULL,
+};
+
+enum named_signed_enum64___diff {
+ SIGNED_ENUM64_VAL1___diff = -101,
+ SIGNED_ENUM64_VAL2___diff = -202,
+ SIGNED_ENUM64_VAL3___diff = -303,
+};
+
+struct core_reloc_enum64val___diff {
+ enum named_unsigned_enum64___diff f1;
+ enum named_signed_enum64___diff f2;
+};
+
/* missing (optional) third enum value */
enum named_enum___val3_missing {
NAMED_ENUM_VAL1___val3_missing = 111,
@@ -1150,6 +1306,21 @@ struct core_reloc_enumval___val3_missing {
anon_enum___val3_missing f2;
};
+enum named_unsigned_enum64___val3_missing {
+ UNSIGNED_ENUM64_VAL1___val3_missing = 0x111ffffffffULL,
+ UNSIGNED_ENUM64_VAL2___val3_missing = 0x222,
+};
+
+enum named_signed_enum64___val3_missing {
+ SIGNED_ENUM64_VAL1___val3_missing = 0x111ffffffffLL,
+ SIGNED_ENUM64_VAL2___val3_missing = -222,
+};
+
+struct core_reloc_enum64val___val3_missing {
+ enum named_unsigned_enum64___val3_missing f1;
+ enum named_signed_enum64___val3_missing f2;
+};
+
/* missing (mandatory) second enum value, should fail */
enum named_enum___err_missing {
NAMED_ENUM_VAL1___err_missing = 1,
@@ -1165,3 +1336,18 @@ struct core_reloc_enumval___err_missing {
enum named_enum___err_missing f1;
anon_enum___err_missing f2;
};
+
+enum named_unsigned_enum64___err_missing {
+ UNSIGNED_ENUM64_VAL1___err_missing = 0x1ffffffffULL,
+ UNSIGNED_ENUM64_VAL3___err_missing = 0x3ffffffffULL,
+};
+
+enum named_signed_enum64___err_missing {
+ SIGNED_ENUM64_VAL1___err_missing = 0x1ffffffffLL,
+ SIGNED_ENUM64_VAL3___err_missing = -3,
+};
+
+struct core_reloc_enum64val___err_missing {
+ enum named_unsigned_enum64___err_missing f1;
+ enum named_signed_enum64___err_missing f2;
+};
diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
new file mode 100644
index 000000000000..0a26c243e6e9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Facebook */
+
+#include <errno.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct test_info {
+ int x;
+ struct bpf_dynptr ptr;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, struct bpf_dynptr);
+} array_map1 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, struct test_info);
+} array_map2 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} array_map3 SEC(".maps");
+
+struct sample {
+ int pid;
+ long value;
+ char comm[16];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+} ringbuf SEC(".maps");
+
+int err, val;
+
+static int get_map_val_dynptr(struct bpf_dynptr *ptr)
+{
+ __u32 key = 0, *map_val;
+
+ bpf_map_update_elem(&array_map3, &key, &val, 0);
+
+ map_val = bpf_map_lookup_elem(&array_map3, &key);
+ if (!map_val)
+ return -ENOENT;
+
+ bpf_dynptr_from_mem(map_val, sizeof(*map_val), 0, ptr);
+
+ return 0;
+}
+
+/* Every bpf_ringbuf_reserve_dynptr call must have a corresponding
+ * bpf_ringbuf_submit/discard_dynptr call
+ */
+SEC("?raw_tp/sys_nanosleep")
+int ringbuf_missing_release1(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ /* missing a call to bpf_ringbuf_discard/submit_dynptr */
+
+ return 0;
+}
+
+SEC("?raw_tp/sys_nanosleep")
+int ringbuf_missing_release2(void *ctx)
+{
+ struct bpf_dynptr ptr1, ptr2;
+ struct sample *sample;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr1);
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr2);
+
+ sample = bpf_dynptr_data(&ptr1, 0, sizeof(*sample));
+ if (!sample) {
+ bpf_ringbuf_discard_dynptr(&ptr1, 0);
+ bpf_ringbuf_discard_dynptr(&ptr2, 0);
+ return 0;
+ }
+
+ bpf_ringbuf_submit_dynptr(&ptr1, 0);
+
+ /* missing a call to bpf_ringbuf_discard/submit_dynptr on ptr2 */
+
+ return 0;
+}
+
+static int missing_release_callback_fn(__u32 index, void *data)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ /* missing a call to bpf_ringbuf_discard/submit_dynptr */
+
+ return 0;
+}
+
+/* Any dynptr initialized within a callback must have bpf_dynptr_put called */
+SEC("?raw_tp/sys_nanosleep")
+int ringbuf_missing_release_callback(void *ctx)
+{
+ bpf_loop(10, missing_release_callback_fn, NULL, 0);
+ return 0;
+}
+
+/* Can't call bpf_ringbuf_submit/discard_dynptr on a non-initialized dynptr */
+SEC("?raw_tp/sys_nanosleep")
+int ringbuf_release_uninit_dynptr(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/* A dynptr can't be used after it has been invalidated */
+SEC("?raw_tp/sys_nanosleep")
+int use_after_invalid(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ char read_data[64];
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(read_data), 0, &ptr);
+
+ bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
+
+ return 0;
+}
+
+/* Can't call non-dynptr ringbuf APIs on a dynptr ringbuf sample */
+SEC("?raw_tp/sys_nanosleep")
+int ringbuf_invalid_api(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct sample *sample;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr);
+ sample = bpf_dynptr_data(&ptr, 0, sizeof(*sample));
+ if (!sample)
+ goto done;
+
+ sample->pid = 123;
+
+ /* invalid API use. need to use dynptr API to submit/discard */
+ bpf_ringbuf_submit(sample, 0);
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+/* Can't add a dynptr to a map */
+SEC("?raw_tp/sys_nanosleep")
+int add_dynptr_to_map1(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ int key = 0;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ /* this should fail */
+ bpf_map_update_elem(&array_map1, &key, &ptr, 0);
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/* Can't add a struct with an embedded dynptr to a map */
+SEC("?raw_tp/sys_nanosleep")
+int add_dynptr_to_map2(void *ctx)
+{
+ struct test_info x;
+ int key = 0;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &x.ptr);
+
+ /* this should fail */
+ bpf_map_update_elem(&array_map2, &key, &x, 0);
+
+ bpf_ringbuf_submit_dynptr(&x.ptr, 0);
+
+ return 0;
+}
+
+/* A data slice can't be accessed out of bounds */
+SEC("?raw_tp/sys_nanosleep")
+int data_slice_out_of_bounds_ringbuf(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ void *data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr);
+
+ data = bpf_dynptr_data(&ptr, 0, 8);
+ if (!data)
+ goto done;
+
+ /* can't index out of bounds of the data slice */
+ val = *((char *)data + 8);
+
+done:
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+ return 0;
+}
+
+SEC("?raw_tp/sys_nanosleep")
+int data_slice_out_of_bounds_map_value(void *ctx)
+{
+ __u32 key = 0, map_val;
+ struct bpf_dynptr ptr;
+ void *data;
+
+ get_map_val_dynptr(&ptr);
+
+ data = bpf_dynptr_data(&ptr, 0, sizeof(map_val));
+ if (!data)
+ return 0;
+
+ /* can't index out of bounds of the data slice */
+ val = *((char *)data + (sizeof(map_val) + 1));
+
+ return 0;
+}
+
+/* A data slice can't be used after it has been released */
+SEC("?raw_tp/sys_nanosleep")
+int data_slice_use_after_release(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct sample *sample;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(*sample), 0, &ptr);
+ sample = bpf_dynptr_data(&ptr, 0, sizeof(*sample));
+ if (!sample)
+ goto done;
+
+ sample->pid = 123;
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ /* this should fail */
+ val = sample->pid;
+
+ return 0;
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+/* A data slice must be first checked for NULL */
+SEC("?raw_tp/sys_nanosleep")
+int data_slice_missing_null_check1(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ void *data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr);
+
+ data = bpf_dynptr_data(&ptr, 0, 8);
+
+ /* missing if (!data) check */
+
+ /* this should fail */
+ *(__u8 *)data = 3;
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+ return 0;
+}
+
+/* A data slice can't be dereferenced if it wasn't checked for null */
+SEC("?raw_tp/sys_nanosleep")
+int data_slice_missing_null_check2(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ __u64 *data1, *data2;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr);
+
+ data1 = bpf_dynptr_data(&ptr, 0, 8);
+ data2 = bpf_dynptr_data(&ptr, 0, 8);
+ if (data1)
+ /* this should fail */
+ *data2 = 3;
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+/* Can't pass in a dynptr as an arg to a helper function that doesn't take in a
+ * dynptr argument
+ */
+SEC("?raw_tp/sys_nanosleep")
+int invalid_helper1(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ get_map_val_dynptr(&ptr);
+
+ /* this should fail */
+ bpf_strncmp((const char *)&ptr, sizeof(ptr), "hello!");
+
+ return 0;
+}
+
+/* A dynptr can't be passed into a helper function at a non-zero offset */
+SEC("?raw_tp/sys_nanosleep")
+int invalid_helper2(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ char read_data[64];
+
+ get_map_val_dynptr(&ptr);
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 8, 0, 0);
+
+ return 0;
+}
+
+/* A bpf_dynptr is invalidated if it's been written into */
+SEC("?raw_tp/sys_nanosleep")
+int invalid_write1(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ void *data;
+ __u8 x = 0;
+
+ get_map_val_dynptr(&ptr);
+
+ memcpy(&ptr, &x, sizeof(x));
+
+ /* this should fail */
+ data = bpf_dynptr_data(&ptr, 0, 1);
+
+ return 0;
+}
+
+/*
+ * A bpf_dynptr can't be used as a dynptr if it has been written into at a fixed
+ * offset
+ */
+SEC("?raw_tp/sys_nanosleep")
+int invalid_write2(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ char read_data[64];
+ __u8 x = 0;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
+
+ memcpy((void *)&ptr + 8, &x, sizeof(x));
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/*
+ * A bpf_dynptr can't be used as a dynptr if it has been written into at a
+ * non-const offset
+ */
+SEC("?raw_tp/sys_nanosleep")
+int invalid_write3(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ char stack_buf[16];
+ unsigned long len;
+ __u8 x = 0;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 8, 0, &ptr);
+
+ memcpy(stack_buf, &val, sizeof(val));
+ len = stack_buf[0] & 0xf;
+
+ memcpy((void *)&ptr + len, &x, sizeof(x));
+
+ /* this should fail */
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+static int invalid_write4_callback(__u32 index, void *data)
+{
+ *(__u32 *)data = 123;
+
+ return 0;
+}
+
+/* If the dynptr is written into in a callback function, it should
+ * be invalidated as a dynptr
+ */
+SEC("?raw_tp/sys_nanosleep")
+int invalid_write4(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
+
+ bpf_loop(10, invalid_write4_callback, &ptr, 0);
+
+ /* this should fail */
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/* A globally-defined bpf_dynptr can't be used (it must reside as a stack frame) */
+struct bpf_dynptr global_dynptr;
+SEC("?raw_tp/sys_nanosleep")
+int global(void *ctx)
+{
+ /* this should fail */
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &global_dynptr);
+
+ bpf_ringbuf_discard_dynptr(&global_dynptr, 0);
+
+ return 0;
+}
+
+/* A direct read should fail */
+SEC("?raw_tp/sys_nanosleep")
+int invalid_read1(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
+
+ /* this should fail */
+ val = *(int *)&ptr;
+
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/* A direct read at an offset should fail */
+SEC("?raw_tp/sys_nanosleep")
+int invalid_read2(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ char read_data[64];
+
+ get_map_val_dynptr(&ptr);
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 1, 0, 0);
+
+ return 0;
+}
+
+/* A direct read at an offset into the lower stack slot should fail */
+SEC("?raw_tp/sys_nanosleep")
+int invalid_read3(void *ctx)
+{
+ struct bpf_dynptr ptr1, ptr2;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr1);
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr2);
+
+ /* this should fail */
+ memcpy(&val, (void *)&ptr1 + 8, sizeof(val));
+
+ bpf_ringbuf_discard_dynptr(&ptr1, 0);
+ bpf_ringbuf_discard_dynptr(&ptr2, 0);
+
+ return 0;
+}
+
+static int invalid_read4_callback(__u32 index, void *data)
+{
+ /* this should fail */
+ val = *(__u32 *)data;
+
+ return 0;
+}
+
+/* A direct read within a callback function should fail */
+SEC("?raw_tp/sys_nanosleep")
+int invalid_read4(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr);
+
+ bpf_loop(10, invalid_read4_callback, &ptr, 0);
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/* Initializing a dynptr on an offset should fail */
+SEC("?raw_tp/sys_nanosleep")
+int invalid_offset(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr + 1);
+
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+/* Can't release a dynptr twice */
+SEC("?raw_tp/sys_nanosleep")
+int release_twice(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 16, 0, &ptr);
+
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+
+ /* this second release should fail */
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+
+ return 0;
+}
+
+static int release_twice_callback_fn(__u32 index, void *data)
+{
+ /* this should fail */
+ bpf_ringbuf_discard_dynptr(data, 0);
+
+ return 0;
+}
+
+/* Test that releasing a dynptr twice, where one of the releases happens
+ * within a calback function, fails
+ */
+SEC("?raw_tp/sys_nanosleep")
+int release_twice_callback(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 32, 0, &ptr);
+
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+
+ bpf_loop(10, release_twice_callback_fn, &ptr, 0);
+
+ return 0;
+}
+
+/* Reject unsupported local mem types for dynptr_from_mem API */
+SEC("?raw_tp/sys_nanosleep")
+int dynptr_from_mem_invalid_api(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ int x = 0;
+
+ /* this should fail */
+ bpf_dynptr_from_mem(&x, sizeof(x), 0, &ptr);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/dynptr_success.c b/tools/testing/selftests/bpf/progs/dynptr_success.c
new file mode 100644
index 000000000000..a3a6103c8569
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/dynptr_success.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Facebook */
+
+#include <string.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "errno.h"
+
+char _license[] SEC("license") = "GPL";
+
+int pid, err, val;
+
+struct sample {
+ int pid;
+ int seq;
+ long value;
+ char comm[16];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+} ringbuf SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} array_map SEC(".maps");
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int test_read_write(void *ctx)
+{
+ char write_data[64] = "hello there, world!!";
+ char read_data[64] = {}, buf[64] = {};
+ struct bpf_dynptr ptr;
+ int i;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(write_data), 0, &ptr);
+
+ /* Write data into the dynptr */
+ err = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);
+
+ /* Read the data that was written into the dynptr */
+ err = err ?: bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
+
+ /* Ensure the data we read matches the data we wrote */
+ for (i = 0; i < sizeof(read_data); i++) {
+ if (read_data[i] != write_data[i]) {
+ err = 1;
+ break;
+ }
+ }
+
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int test_data_slice(void *ctx)
+{
+ __u32 key = 0, val = 235, *map_val;
+ struct bpf_dynptr ptr;
+ __u32 map_val_size;
+ void *data;
+
+ map_val_size = sizeof(*map_val);
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ bpf_map_update_elem(&array_map, &key, &val, 0);
+
+ map_val = bpf_map_lookup_elem(&array_map, &key);
+ if (!map_val) {
+ err = 1;
+ return 0;
+ }
+
+ bpf_dynptr_from_mem(map_val, map_val_size, 0, &ptr);
+
+ /* Try getting a data slice that is out of range */
+ data = bpf_dynptr_data(&ptr, map_val_size + 1, 1);
+ if (data) {
+ err = 2;
+ return 0;
+ }
+
+ /* Try getting more bytes than available */
+ data = bpf_dynptr_data(&ptr, 0, map_val_size + 1);
+ if (data) {
+ err = 3;
+ return 0;
+ }
+
+ data = bpf_dynptr_data(&ptr, 0, sizeof(__u32));
+ if (!data) {
+ err = 4;
+ return 0;
+ }
+
+ *(__u32 *)data = 999;
+
+ err = bpf_probe_read_kernel(&val, sizeof(val), data);
+ if (err)
+ return 0;
+
+ if (val != *(int *)data)
+ err = 5;
+
+ return 0;
+}
+
+static int ringbuf_callback(__u32 index, void *data)
+{
+ struct sample *sample;
+
+ struct bpf_dynptr *ptr = (struct bpf_dynptr *)data;
+
+ sample = bpf_dynptr_data(ptr, 0, sizeof(*sample));
+ if (!sample)
+ err = 2;
+ else
+ sample->pid += index;
+
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int test_ringbuf(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct sample *sample;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ val = 100;
+
+ /* check that you can reserve a dynamic size reservation */
+ err = bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ sample = err ? NULL : bpf_dynptr_data(&ptr, 0, sizeof(*sample));
+ if (!sample) {
+ err = 1;
+ goto done;
+ }
+
+ sample->pid = 10;
+
+ /* Can pass dynptr to callback functions */
+ bpf_loop(10, ringbuf_callback, &ptr, 0);
+
+ if (sample->pid != 55)
+ err = 2;
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/exhandler_kern.c b/tools/testing/selftests/bpf/progs/exhandler_kern.c
index f5ca142abf8f..20d009e2d266 100644
--- a/tools/testing/selftests/bpf/progs/exhandler_kern.c
+++ b/tools/testing/selftests/bpf/progs/exhandler_kern.c
@@ -37,7 +37,16 @@ int BPF_PROG(trace_task_newtask, struct task_struct *task, u64 clone_flags)
*/
work = task->task_works;
func = work->func;
- if (!work && !func)
- exception_triggered++;
+ /* Currently verifier will fail for `btf_ptr |= btf_ptr` * instruction.
+ * To workaround the issue, use barrier_var() and rewrite as below to
+ * prevent compiler from generating verifier-unfriendly code.
+ */
+ barrier_var(work);
+ if (work)
+ return 0;
+ barrier_var(func);
+ if (func)
+ return 0;
+ exception_triggered++;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/for_each_map_elem_write_key.c b/tools/testing/selftests/bpf/progs/for_each_map_elem_write_key.c
new file mode 100644
index 000000000000..8e545865ea33
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/for_each_map_elem_write_key.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} array_map SEC(".maps");
+
+static __u64
+check_array_elem(struct bpf_map *map, __u32 *key, __u64 *val,
+ void *data)
+{
+ bpf_get_current_comm(key, sizeof(*key));
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+int test_map_key_write(const void *ctx)
+{
+ bpf_for_each_map_elem(&array_map, check_array_elem, NULL, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/freplace_global_func.c b/tools/testing/selftests/bpf/progs/freplace_global_func.c
new file mode 100644
index 000000000000..96cb61a6ce87
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/freplace_global_func.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+__noinline
+int test_ctx_global_func(struct __sk_buff *skb)
+{
+ volatile int retval = 1;
+ return retval;
+}
+
+SEC("freplace/test_pkt_access")
+int new_test_pkt_access(struct __sk_buff *skb)
+{
+ return test_ctx_global_func(skb);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi.c b/tools/testing/selftests/bpf/progs/kprobe_multi.c
index 600be50800f8..08f95a8155d1 100644
--- a/tools/testing/selftests/bpf/progs/kprobe_multi.c
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi.c
@@ -54,21 +54,21 @@ static void kprobe_multi_check(void *ctx, bool is_return)
if (is_return) {
SET(kretprobe_test1_result, &bpf_fentry_test1, 8);
- SET(kretprobe_test2_result, &bpf_fentry_test2, 7);
- SET(kretprobe_test3_result, &bpf_fentry_test3, 6);
- SET(kretprobe_test4_result, &bpf_fentry_test4, 5);
- SET(kretprobe_test5_result, &bpf_fentry_test5, 4);
- SET(kretprobe_test6_result, &bpf_fentry_test6, 3);
- SET(kretprobe_test7_result, &bpf_fentry_test7, 2);
+ SET(kretprobe_test2_result, &bpf_fentry_test2, 2);
+ SET(kretprobe_test3_result, &bpf_fentry_test3, 7);
+ SET(kretprobe_test4_result, &bpf_fentry_test4, 6);
+ SET(kretprobe_test5_result, &bpf_fentry_test5, 5);
+ SET(kretprobe_test6_result, &bpf_fentry_test6, 4);
+ SET(kretprobe_test7_result, &bpf_fentry_test7, 3);
SET(kretprobe_test8_result, &bpf_fentry_test8, 1);
} else {
SET(kprobe_test1_result, &bpf_fentry_test1, 1);
- SET(kprobe_test2_result, &bpf_fentry_test2, 2);
- SET(kprobe_test3_result, &bpf_fentry_test3, 3);
- SET(kprobe_test4_result, &bpf_fentry_test4, 4);
- SET(kprobe_test5_result, &bpf_fentry_test5, 5);
- SET(kprobe_test6_result, &bpf_fentry_test6, 6);
- SET(kprobe_test7_result, &bpf_fentry_test7, 7);
+ SET(kprobe_test2_result, &bpf_fentry_test2, 7);
+ SET(kprobe_test3_result, &bpf_fentry_test3, 2);
+ SET(kprobe_test4_result, &bpf_fentry_test4, 3);
+ SET(kprobe_test5_result, &bpf_fentry_test5, 4);
+ SET(kprobe_test6_result, &bpf_fentry_test6, 5);
+ SET(kprobe_test7_result, &bpf_fentry_test7, 6);
SET(kprobe_test8_result, &bpf_fentry_test8, 8);
}
@@ -98,3 +98,17 @@ int test_kretprobe(struct pt_regs *ctx)
kprobe_multi_check(ctx, true);
return 0;
}
+
+SEC("kprobe.multi")
+int test_kprobe_manual(struct pt_regs *ctx)
+{
+ kprobe_multi_check(ctx, false);
+ return 0;
+}
+
+SEC("kretprobe.multi")
+int test_kretprobe_manual(struct pt_regs *ctx)
+{
+ kprobe_multi_check(ctx, true);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_empty.c b/tools/testing/selftests/bpf/progs/kprobe_multi_empty.c
new file mode 100644
index 000000000000..e76e499aca39
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/kprobe_multi_empty.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("kprobe.multi/")
+int test_kprobe_empty(struct pt_regs *ctx)
+{
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/linked_funcs1.c b/tools/testing/selftests/bpf/progs/linked_funcs1.c
index b964ec1390c2..b05571bc67d5 100644
--- a/tools/testing/selftests/bpf/progs/linked_funcs1.c
+++ b/tools/testing/selftests/bpf/progs/linked_funcs1.c
@@ -4,6 +4,7 @@
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
/* weak and shared between two files */
const volatile int my_tid __weak;
@@ -44,6 +45,13 @@ void set_output_ctx1(__u64 *ctx)
/* this weak instance should win because it's the first one */
__weak int set_output_weak(int x)
{
+ static volatile int whatever;
+
+ /* make sure we use CO-RE relocations in a weak function, this used to
+ * cause problems for BPF static linker
+ */
+ whatever = bpf_core_type_size(struct task_struct);
+
output_weak1 = x;
return x;
}
@@ -53,12 +61,17 @@ extern int set_output_val2(int x);
/* here we'll force set_output_ctx2() to be __hidden in the final obj file */
__hidden extern void set_output_ctx2(__u64 *ctx);
-SEC("raw_tp/sys_enter")
+SEC("?raw_tp/sys_enter")
int BPF_PROG(handler1, struct pt_regs *regs, long id)
{
+ static volatile int whatever;
+
if (my_tid != (u32)bpf_get_current_pid_tgid() || id != syscall_id)
return 0;
+ /* make sure we have CO-RE relocations in main program */
+ whatever = bpf_core_type_size(struct task_struct);
+
set_output_val2(1000);
set_output_ctx2(ctx); /* ctx definition is hidden in BPF_PROG macro */
diff --git a/tools/testing/selftests/bpf/progs/linked_funcs2.c b/tools/testing/selftests/bpf/progs/linked_funcs2.c
index 575e958e60b7..ee7e3848ee4f 100644
--- a/tools/testing/selftests/bpf/progs/linked_funcs2.c
+++ b/tools/testing/selftests/bpf/progs/linked_funcs2.c
@@ -4,6 +4,7 @@
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
/* weak and shared between both files */
const volatile int my_tid __weak;
@@ -44,6 +45,13 @@ void set_output_ctx2(__u64 *ctx)
/* this weak instance should lose, because it will be processed second */
__weak int set_output_weak(int x)
{
+ static volatile int whatever;
+
+ /* make sure we use CO-RE relocations in a weak function, this used to
+ * cause problems for BPF static linker
+ */
+ whatever = 2 * bpf_core_type_size(struct task_struct);
+
output_weak2 = x;
return 2 * x;
}
@@ -53,12 +61,17 @@ extern int set_output_val1(int x);
/* here we'll force set_output_ctx1() to be __hidden in the final obj file */
__hidden extern void set_output_ctx1(__u64 *ctx);
-SEC("raw_tp/sys_enter")
+SEC("?raw_tp/sys_enter")
int BPF_PROG(handler2, struct pt_regs *regs, long id)
{
+ static volatile int whatever;
+
if (my_tid != (u32)bpf_get_current_pid_tgid() || id != syscall_id)
return 0;
+ /* make sure we have CO-RE relocations in main program */
+ whatever = bpf_core_type_size(struct task_struct);
+
set_output_val1(2000);
set_output_ctx1(ctx); /* ctx definition is hidden in BPF_PROG macro */
diff --git a/tools/testing/selftests/bpf/progs/local_storage_bench.c b/tools/testing/selftests/bpf/progs/local_storage_bench.c
new file mode 100644
index 000000000000..2c3234c5b73a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/local_storage_bench.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define HASHMAP_SZ 4194304
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1000);
+ __type(key, int);
+ __type(value, int);
+ __array(values, struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+ });
+} array_of_local_storage_maps SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1000);
+ __type(key, int);
+ __type(value, int);
+ __array(values, struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, HASHMAP_SZ);
+ __type(key, int);
+ __type(value, int);
+ });
+} array_of_hash_maps SEC(".maps");
+
+long important_hits;
+long hits;
+
+/* set from user-space */
+const volatile unsigned int use_hashmap;
+const volatile unsigned int hashmap_num_keys;
+const volatile unsigned int num_maps;
+const volatile unsigned int interleave;
+
+struct loop_ctx {
+ struct task_struct *task;
+ long loop_hits;
+ long loop_important_hits;
+};
+
+static int do_lookup(unsigned int elem, struct loop_ctx *lctx)
+{
+ void *map, *inner_map;
+ int idx = 0;
+
+ if (use_hashmap)
+ map = &array_of_hash_maps;
+ else
+ map = &array_of_local_storage_maps;
+
+ inner_map = bpf_map_lookup_elem(map, &elem);
+ if (!inner_map)
+ return -1;
+
+ if (use_hashmap) {
+ idx = bpf_get_prandom_u32() % hashmap_num_keys;
+ bpf_map_lookup_elem(inner_map, &idx);
+ } else {
+ bpf_task_storage_get(inner_map, lctx->task, &idx,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ }
+
+ lctx->loop_hits++;
+ if (!elem)
+ lctx->loop_important_hits++;
+ return 0;
+}
+
+static long loop(u32 index, void *ctx)
+{
+ struct loop_ctx *lctx = (struct loop_ctx *)ctx;
+ unsigned int map_idx = index % num_maps;
+
+ do_lookup(map_idx, lctx);
+ if (interleave && map_idx % 3 == 0)
+ do_lookup(0, lctx);
+ return 0;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int get_local(void *ctx)
+{
+ struct loop_ctx lctx;
+
+ lctx.task = bpf_get_current_task_btf();
+ lctx.loop_hits = 0;
+ lctx.loop_important_hits = 0;
+ bpf_loop(10000, &loop, &lctx, 0);
+ __sync_add_and_fetch(&hits, lctx.loop_hits);
+ __sync_add_and_fetch(&important_hits, lctx.loop_important_hits);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/local_storage_rcu_tasks_trace_bench.c b/tools/testing/selftests/bpf/progs/local_storage_rcu_tasks_trace_bench.c
new file mode 100644
index 000000000000..03bf69f49075
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/local_storage_rcu_tasks_trace_bench.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} task_storage SEC(".maps");
+
+long hits;
+long gp_hits;
+long gp_times;
+long current_gp_start;
+long unexpected;
+bool postgp_seen;
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int get_local(void *ctx)
+{
+ struct task_struct *task;
+ int idx;
+ int *s;
+
+ idx = 0;
+ task = bpf_get_current_task_btf();
+ s = bpf_task_storage_get(&task_storage, task, &idx,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!s)
+ return 0;
+
+ *s = 3;
+ bpf_task_storage_delete(&task_storage, task);
+ __sync_add_and_fetch(&hits, 1);
+ return 0;
+}
+
+SEC("fentry/rcu_tasks_trace_pregp_step")
+int pregp_step(struct pt_regs *ctx)
+{
+ current_gp_start = bpf_ktime_get_ns();
+ return 0;
+}
+
+SEC("fentry/rcu_tasks_trace_postgp")
+int postgp(struct pt_regs *ctx)
+{
+ if (!current_gp_start && postgp_seen) {
+ /* Will only happen if prog tracing rcu_tasks_trace_pregp_step doesn't
+ * execute before this prog
+ */
+ __sync_add_and_fetch(&unexpected, 1);
+ return 0;
+ }
+
+ __sync_add_and_fetch(&gp_times, bpf_ktime_get_ns() - current_gp_start);
+ __sync_add_and_fetch(&gp_hits, 1);
+ current_gp_start = 0;
+ postgp_seen = true;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/loop5.c b/tools/testing/selftests/bpf/progs/loop5.c
index 913791923fa3..1b13f37f85ec 100644
--- a/tools/testing/selftests/bpf/progs/loop5.c
+++ b/tools/testing/selftests/bpf/progs/loop5.c
@@ -2,7 +2,6 @@
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
-#define barrier() __asm__ __volatile__("": : :"memory")
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/lsm_cgroup.c b/tools/testing/selftests/bpf/progs/lsm_cgroup.c
new file mode 100644
index 000000000000..4f2d60b87b75
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lsm_cgroup.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+#ifndef AF_PACKET
+#define AF_PACKET 17
+#endif
+
+#ifndef AF_UNIX
+#define AF_UNIX 1
+#endif
+
+#ifndef EPERM
+#define EPERM 1
+#endif
+
+struct {
+ __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
+ __type(key, __u64);
+ __type(value, __u64);
+} cgroup_storage SEC(".maps");
+
+int called_socket_post_create;
+int called_socket_post_create2;
+int called_socket_bind;
+int called_socket_bind2;
+int called_socket_alloc;
+int called_socket_clone;
+
+static __always_inline int test_local_storage(void)
+{
+ __u64 *val;
+
+ val = bpf_get_local_storage(&cgroup_storage, 0);
+ if (!val)
+ return 0;
+ *val += 1;
+
+ return 1;
+}
+
+static __always_inline int real_create(struct socket *sock, int family,
+ int protocol)
+{
+ struct sock *sk;
+ int prio = 123;
+
+ /* Reject non-tx-only AF_PACKET. */
+ if (family == AF_PACKET && protocol != 0)
+ return 0; /* EPERM */
+
+ sk = sock->sk;
+ if (!sk)
+ return 1;
+
+ /* The rest of the sockets get default policy. */
+ if (bpf_setsockopt(sk, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(prio)))
+ return 0; /* EPERM */
+
+ /* Make sure bpf_getsockopt is allowed and works. */
+ prio = 0;
+ if (bpf_getsockopt(sk, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(prio)))
+ return 0; /* EPERM */
+ if (prio != 123)
+ return 0; /* EPERM */
+
+ /* Can access cgroup local storage. */
+ if (!test_local_storage())
+ return 0; /* EPERM */
+
+ return 1;
+}
+
+/* __cgroup_bpf_run_lsm_socket */
+SEC("lsm_cgroup/socket_post_create")
+int BPF_PROG(socket_post_create, struct socket *sock, int family,
+ int type, int protocol, int kern)
+{
+ called_socket_post_create++;
+ return real_create(sock, family, protocol);
+}
+
+/* __cgroup_bpf_run_lsm_socket */
+SEC("lsm_cgroup/socket_post_create")
+int BPF_PROG(socket_post_create2, struct socket *sock, int family,
+ int type, int protocol, int kern)
+{
+ called_socket_post_create2++;
+ return real_create(sock, family, protocol);
+}
+
+static __always_inline int real_bind(struct socket *sock,
+ struct sockaddr *address,
+ int addrlen)
+{
+ struct sockaddr_ll sa = {};
+
+ if (sock->sk->__sk_common.skc_family != AF_PACKET)
+ return 1;
+
+ if (sock->sk->sk_kern_sock)
+ return 1;
+
+ bpf_probe_read_kernel(&sa, sizeof(sa), address);
+ if (sa.sll_protocol)
+ return 0; /* EPERM */
+
+ /* Can access cgroup local storage. */
+ if (!test_local_storage())
+ return 0; /* EPERM */
+
+ return 1;
+}
+
+/* __cgroup_bpf_run_lsm_socket */
+SEC("lsm_cgroup/socket_bind")
+int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address,
+ int addrlen)
+{
+ called_socket_bind++;
+ return real_bind(sock, address, addrlen);
+}
+
+/* __cgroup_bpf_run_lsm_socket */
+SEC("lsm_cgroup/socket_bind")
+int BPF_PROG(socket_bind2, struct socket *sock, struct sockaddr *address,
+ int addrlen)
+{
+ called_socket_bind2++;
+ return real_bind(sock, address, addrlen);
+}
+
+/* __cgroup_bpf_run_lsm_current (via bpf_lsm_current_hooks) */
+SEC("lsm_cgroup/sk_alloc_security")
+int BPF_PROG(socket_alloc, struct sock *sk, int family, gfp_t priority)
+{
+ called_socket_alloc++;
+ if (family == AF_UNIX)
+ return 0; /* EPERM */
+
+ /* Can access cgroup local storage. */
+ if (!test_local_storage())
+ return 0; /* EPERM */
+
+ return 1;
+}
+
+/* __cgroup_bpf_run_lsm_sock */
+SEC("lsm_cgroup/inet_csk_clone")
+int BPF_PROG(socket_clone, struct sock *newsk, const struct request_sock *req)
+{
+ int prio = 234;
+
+ if (!newsk)
+ return 1;
+
+ /* Accepted request sockets get a different priority. */
+ if (bpf_setsockopt(newsk, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(prio)))
+ return 1;
+
+ /* Make sure bpf_getsockopt is allowed and works. */
+ prio = 0;
+ if (bpf_getsockopt(newsk, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(prio)))
+ return 1;
+ if (prio != 234)
+ return 1;
+
+ /* Can access cgroup local storage. */
+ if (!test_local_storage())
+ return 1;
+
+ called_socket_clone++;
+
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/lsm_cgroup_nonvoid.c b/tools/testing/selftests/bpf/progs/lsm_cgroup_nonvoid.c
new file mode 100644
index 000000000000..6cb0f161f417
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/lsm_cgroup_nonvoid.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("lsm_cgroup/inet_csk_clone")
+int BPF_PROG(nonvoid_socket_clone, struct sock *newsk, const struct request_sock *req)
+{
+ /* Can not return any errors from void LSM hooks. */
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c
new file mode 100644
index 000000000000..eb8217803493
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/map_kptr.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+struct map_value {
+ struct prog_test_ref_kfunc __kptr *unref_ptr;
+ struct prog_test_ref_kfunc __kptr_ref *ref_ptr;
+};
+
+struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} array_map SEC(".maps");
+
+struct hash_map {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} hash_map SEC(".maps");
+
+struct hash_malloc_map {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} hash_malloc_map SEC(".maps");
+
+struct lru_hash_map {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} lru_hash_map SEC(".maps");
+
+#define DEFINE_MAP_OF_MAP(map_type, inner_map_type, name) \
+ struct { \
+ __uint(type, map_type); \
+ __uint(max_entries, 1); \
+ __uint(key_size, sizeof(int)); \
+ __uint(value_size, sizeof(int)); \
+ __array(values, struct inner_map_type); \
+ } name SEC(".maps") = { \
+ .values = { [0] = &inner_map_type }, \
+ }
+
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_map, array_of_array_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_map, array_of_hash_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_malloc_map, array_of_hash_malloc_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, lru_hash_map, array_of_lru_hash_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, array_map, hash_of_array_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_map, hash_of_hash_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_malloc_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps);
+
+extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym;
+extern struct prog_test_ref_kfunc *
+bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym;
+extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
+
+static void test_kptr_unref(struct map_value *v)
+{
+ struct prog_test_ref_kfunc *p;
+
+ p = v->unref_ptr;
+ /* store untrusted_ptr_or_null_ */
+ v->unref_ptr = p;
+ if (!p)
+ return;
+ if (p->a + p->b > 100)
+ return;
+ /* store untrusted_ptr_ */
+ v->unref_ptr = p;
+ /* store NULL */
+ v->unref_ptr = NULL;
+}
+
+static void test_kptr_ref(struct map_value *v)
+{
+ struct prog_test_ref_kfunc *p;
+
+ p = v->ref_ptr;
+ /* store ptr_or_null_ */
+ v->unref_ptr = p;
+ if (!p)
+ return;
+ if (p->a + p->b > 100)
+ return;
+ /* store NULL */
+ p = bpf_kptr_xchg(&v->ref_ptr, NULL);
+ if (!p)
+ return;
+ if (p->a + p->b > 100) {
+ bpf_kfunc_call_test_release(p);
+ return;
+ }
+ /* store ptr_ */
+ v->unref_ptr = p;
+ bpf_kfunc_call_test_release(p);
+
+ p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
+ if (!p)
+ return;
+ /* store ptr_ */
+ p = bpf_kptr_xchg(&v->ref_ptr, p);
+ if (!p)
+ return;
+ if (p->a + p->b > 100) {
+ bpf_kfunc_call_test_release(p);
+ return;
+ }
+ bpf_kfunc_call_test_release(p);
+}
+
+static void test_kptr_get(struct map_value *v)
+{
+ struct prog_test_ref_kfunc *p;
+
+ p = bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0);
+ if (!p)
+ return;
+ if (p->a + p->b > 100) {
+ bpf_kfunc_call_test_release(p);
+ return;
+ }
+ bpf_kfunc_call_test_release(p);
+}
+
+static void test_kptr(struct map_value *v)
+{
+ test_kptr_unref(v);
+ test_kptr_ref(v);
+ test_kptr_get(v);
+}
+
+SEC("tc")
+int test_map_kptr(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+#define TEST(map) \
+ v = bpf_map_lookup_elem(&map, &key); \
+ if (!v) \
+ return 0; \
+ test_kptr(v)
+
+ TEST(array_map);
+ TEST(hash_map);
+ TEST(hash_malloc_map);
+ TEST(lru_hash_map);
+
+#undef TEST
+ return 0;
+}
+
+SEC("tc")
+int test_map_in_map_kptr(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+ void *map;
+
+#define TEST(map_in_map) \
+ map = bpf_map_lookup_elem(&map_in_map, &key); \
+ if (!map) \
+ return 0; \
+ v = bpf_map_lookup_elem(map, &key); \
+ if (!v) \
+ return 0; \
+ test_kptr(v)
+
+ TEST(array_of_array_maps);
+ TEST(array_of_hash_maps);
+ TEST(array_of_hash_malloc_maps);
+ TEST(array_of_lru_hash_maps);
+ TEST(hash_of_array_maps);
+ TEST(hash_of_hash_maps);
+ TEST(hash_of_hash_malloc_maps);
+ TEST(hash_of_lru_hash_maps);
+
+#undef TEST
+ return 0;
+}
+
+SEC("tc")
+int test_map_kptr_ref(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *p, *p_st;
+ unsigned long arg = 0;
+ struct map_value *v;
+ int key = 0, ret;
+
+ p = bpf_kfunc_call_test_acquire(&arg);
+ if (!p)
+ return 1;
+
+ p_st = p->next;
+ if (p_st->cnt.refs.counter != 2) {
+ ret = 2;
+ goto end;
+ }
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v) {
+ ret = 3;
+ goto end;
+ }
+
+ p = bpf_kptr_xchg(&v->ref_ptr, p);
+ if (p) {
+ ret = 4;
+ goto end;
+ }
+ if (p_st->cnt.refs.counter != 2)
+ return 5;
+
+ p = bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0);
+ if (!p)
+ return 6;
+ if (p_st->cnt.refs.counter != 3) {
+ ret = 7;
+ goto end;
+ }
+ bpf_kfunc_call_test_release(p);
+ if (p_st->cnt.refs.counter != 2)
+ return 8;
+
+ p = bpf_kptr_xchg(&v->ref_ptr, NULL);
+ if (!p)
+ return 9;
+ bpf_kfunc_call_test_release(p);
+ if (p_st->cnt.refs.counter != 1)
+ return 10;
+
+ p = bpf_kfunc_call_test_acquire(&arg);
+ if (!p)
+ return 11;
+ p = bpf_kptr_xchg(&v->ref_ptr, p);
+ if (p) {
+ ret = 12;
+ goto end;
+ }
+ if (p_st->cnt.refs.counter != 2)
+ return 13;
+ /* Leave in map */
+
+ return 0;
+end:
+ bpf_kfunc_call_test_release(p);
+ return ret;
+}
+
+SEC("tc")
+int test_map_kptr_ref2(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *p, *p_st;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 1;
+
+ p_st = v->ref_ptr;
+ if (!p_st || p_st->cnt.refs.counter != 2)
+ return 2;
+
+ p = bpf_kptr_xchg(&v->ref_ptr, NULL);
+ if (!p)
+ return 3;
+ if (p_st->cnt.refs.counter != 2) {
+ bpf_kfunc_call_test_release(p);
+ return 4;
+ }
+
+ p = bpf_kptr_xchg(&v->ref_ptr, p);
+ if (p) {
+ bpf_kfunc_call_test_release(p);
+ return 5;
+ }
+ if (p_st->cnt.refs.counter != 2)
+ return 6;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c
new file mode 100644
index 000000000000..05e209b1b12a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+struct map_value {
+ char buf[8];
+ struct prog_test_ref_kfunc __kptr *unref_ptr;
+ struct prog_test_ref_kfunc __kptr_ref *ref_ptr;
+ struct prog_test_member __kptr_ref *ref_memb_ptr;
+};
+
+struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} array_map SEC(".maps");
+
+extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym;
+extern struct prog_test_ref_kfunc *
+bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym;
+
+SEC("?tc")
+int size_not_bpf_dw(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ *(u32 *)&v->unref_ptr = 0;
+ return 0;
+}
+
+SEC("?tc")
+int non_const_var_off(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0, id;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ id = ctx->protocol;
+ if (id < 4 || id > 12)
+ return 0;
+ *(u64 *)((void *)v + id) = 0;
+
+ return 0;
+}
+
+SEC("?tc")
+int non_const_var_off_kptr_xchg(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0, id;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ id = ctx->protocol;
+ if (id < 4 || id > 12)
+ return 0;
+ bpf_kptr_xchg((void *)v + id, NULL);
+
+ return 0;
+}
+
+SEC("?tc")
+int misaligned_access_write(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ *(void **)((void *)v + 7) = NULL;
+
+ return 0;
+}
+
+SEC("?tc")
+int misaligned_access_read(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ return *(u64 *)((void *)v + 1);
+}
+
+SEC("?tc")
+int reject_var_off_store(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *unref_ptr;
+ struct map_value *v;
+ int key = 0, id;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ unref_ptr = v->unref_ptr;
+ if (!unref_ptr)
+ return 0;
+ id = ctx->protocol;
+ if (id < 4 || id > 12)
+ return 0;
+ unref_ptr += id;
+ v->unref_ptr = unref_ptr;
+
+ return 0;
+}
+
+SEC("?tc")
+int reject_bad_type_match(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *unref_ptr;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ unref_ptr = v->unref_ptr;
+ if (!unref_ptr)
+ return 0;
+ unref_ptr = (void *)unref_ptr + 4;
+ v->unref_ptr = unref_ptr;
+
+ return 0;
+}
+
+SEC("?tc")
+int marked_as_untrusted_or_null(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ bpf_this_cpu_ptr(v->unref_ptr);
+ return 0;
+}
+
+SEC("?tc")
+int correct_btf_id_check_size(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ p = v->unref_ptr;
+ if (!p)
+ return 0;
+ return *(int *)((void *)p + bpf_core_type_size(struct prog_test_ref_kfunc));
+}
+
+SEC("?tc")
+int inherit_untrusted_on_walk(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *unref_ptr;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ unref_ptr = v->unref_ptr;
+ if (!unref_ptr)
+ return 0;
+ unref_ptr = unref_ptr->next;
+ bpf_this_cpu_ptr(unref_ptr);
+ return 0;
+}
+
+SEC("?tc")
+int reject_kptr_xchg_on_unref(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ bpf_kptr_xchg(&v->unref_ptr, NULL);
+ return 0;
+}
+
+SEC("?tc")
+int reject_kptr_get_no_map_val(struct __sk_buff *ctx)
+{
+ bpf_kfunc_call_test_kptr_get((void *)&ctx, 0, 0);
+ return 0;
+}
+
+SEC("?tc")
+int reject_kptr_get_no_null_map_val(struct __sk_buff *ctx)
+{
+ bpf_kfunc_call_test_kptr_get(bpf_map_lookup_elem(&array_map, &(int){0}), 0, 0);
+ return 0;
+}
+
+SEC("?tc")
+int reject_kptr_get_no_kptr(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ bpf_kfunc_call_test_kptr_get((void *)v, 0, 0);
+ return 0;
+}
+
+SEC("?tc")
+int reject_kptr_get_on_unref(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ bpf_kfunc_call_test_kptr_get(&v->unref_ptr, 0, 0);
+ return 0;
+}
+
+SEC("?tc")
+int reject_kptr_get_bad_type_match(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ bpf_kfunc_call_test_kptr_get((void *)&v->ref_memb_ptr, 0, 0);
+ return 0;
+}
+
+SEC("?tc")
+int mark_ref_as_untrusted_or_null(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ bpf_this_cpu_ptr(v->ref_ptr);
+ return 0;
+}
+
+SEC("?tc")
+int reject_untrusted_store_to_ref(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ p = v->ref_ptr;
+ if (!p)
+ return 0;
+ /* Checkmate, clang */
+ *(struct prog_test_ref_kfunc * volatile *)&v->ref_ptr = p;
+ return 0;
+}
+
+SEC("?tc")
+int reject_untrusted_xchg(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ p = v->ref_ptr;
+ if (!p)
+ return 0;
+ bpf_kptr_xchg(&v->ref_ptr, p);
+ return 0;
+}
+
+SEC("?tc")
+int reject_bad_type_xchg(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *ref_ptr;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ ref_ptr = bpf_kfunc_call_test_acquire(&(unsigned long){0});
+ if (!ref_ptr)
+ return 0;
+ bpf_kptr_xchg(&v->ref_memb_ptr, ref_ptr);
+ return 0;
+}
+
+SEC("?tc")
+int reject_member_of_ref_xchg(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *ref_ptr;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ ref_ptr = bpf_kfunc_call_test_acquire(&(unsigned long){0});
+ if (!ref_ptr)
+ return 0;
+ bpf_kptr_xchg(&v->ref_memb_ptr, &ref_ptr->memb);
+ return 0;
+}
+
+SEC("?syscall")
+int reject_indirect_helper_access(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ bpf_get_current_comm(v, sizeof(v->buf) + 1);
+ return 0;
+}
+
+__noinline
+int write_func(int *p)
+{
+ return p ? *p = 42 : 0;
+}
+
+SEC("?tc")
+int reject_indirect_global_func_access(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ return write_func((void *)v + 5);
+}
+
+SEC("?tc")
+int kptr_xchg_ref_state(struct __sk_buff *ctx)
+{
+ struct prog_test_ref_kfunc *p;
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
+ if (!p)
+ return 0;
+ bpf_kptr_xchg(&v->ref_ptr, p);
+ return 0;
+}
+
+SEC("?tc")
+int kptr_get_ref_state(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int key = 0;
+
+ v = bpf_map_lookup_elem(&array_map, &key);
+ if (!v)
+ return 0;
+
+ bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/mptcp_sock.c b/tools/testing/selftests/bpf/progs/mptcp_sock.c
new file mode 100644
index 000000000000..91a0d7eff2ac
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mptcp_sock.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Tessares SA. */
+/* Copyright (c) 2022, SUSE. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_tcp_helpers.h"
+
+char _license[] SEC("license") = "GPL";
+__u32 token = 0;
+
+struct mptcp_storage {
+ __u32 invoked;
+ __u32 is_mptcp;
+ struct sock *sk;
+ __u32 token;
+ struct sock *first;
+ char ca_name[TCP_CA_NAME_MAX];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct mptcp_storage);
+} socket_storage_map SEC(".maps");
+
+SEC("sockops")
+int _sockops(struct bpf_sock_ops *ctx)
+{
+ struct mptcp_storage *storage;
+ struct mptcp_sock *msk;
+ int op = (int)ctx->op;
+ struct tcp_sock *tsk;
+ struct bpf_sock *sk;
+ bool is_mptcp;
+
+ if (op != BPF_SOCK_OPS_TCP_CONNECT_CB)
+ return 1;
+
+ sk = ctx->sk;
+ if (!sk)
+ return 1;
+
+ tsk = bpf_skc_to_tcp_sock(sk);
+ if (!tsk)
+ return 1;
+
+ is_mptcp = bpf_core_field_exists(tsk->is_mptcp) ? tsk->is_mptcp : 0;
+ if (!is_mptcp) {
+ storage = bpf_sk_storage_get(&socket_storage_map, sk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ if (!storage)
+ return 1;
+
+ storage->token = 0;
+ __builtin_memset(storage->ca_name, 0, TCP_CA_NAME_MAX);
+ storage->first = NULL;
+ } else {
+ msk = bpf_skc_to_mptcp_sock(sk);
+ if (!msk)
+ return 1;
+
+ storage = bpf_sk_storage_get(&socket_storage_map, msk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ if (!storage)
+ return 1;
+
+ storage->token = msk->token;
+ __builtin_memcpy(storage->ca_name, msk->ca_name, TCP_CA_NAME_MAX);
+ storage->first = msk->first;
+ }
+ storage->invoked++;
+ storage->is_mptcp = is_mptcp;
+ storage->sk = (struct sock *)sk;
+
+ return 1;
+}
+
+SEC("fentry/mptcp_pm_new_connection")
+int BPF_PROG(trace_mptcp_pm_new_connection, struct mptcp_sock *msk,
+ const struct sock *ssk, int server_side)
+{
+ if (!server_side)
+ token = msk->token;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/perf_event_stackmap.c b/tools/testing/selftests/bpf/progs/perf_event_stackmap.c
index b3fcb5274ee0..f793280a3238 100644
--- a/tools/testing/selftests/bpf/progs/perf_event_stackmap.c
+++ b/tools/testing/selftests/bpf/progs/perf_event_stackmap.c
@@ -35,10 +35,10 @@ int oncpu(void *ctx)
long val;
val = bpf_get_stackid(ctx, &stackmap, 0);
- if (val > 0)
+ if (val >= 0)
stackid_kernel = 2;
val = bpf_get_stackid(ctx, &stackmap, BPF_F_USER_STACK);
- if (val > 0)
+ if (val >= 0)
stackid_user = 2;
trace = bpf_map_lookup_elem(&stackdata_map, &key);
diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h
index 4896fdf816f7..92331053dba3 100644
--- a/tools/testing/selftests/bpf/progs/profiler.inc.h
+++ b/tools/testing/selftests/bpf/progs/profiler.inc.h
@@ -826,8 +826,9 @@ out:
SEC("kprobe/vfs_link")
int BPF_KPROBE(kprobe__vfs_link,
- struct dentry* old_dentry, struct inode* dir,
- struct dentry* new_dentry, struct inode** delegated_inode)
+ struct dentry* old_dentry, struct user_namespace *mnt_userns,
+ struct inode* dir, struct dentry* new_dentry,
+ struct inode** delegated_inode)
{
struct bpf_func_stats_ctx stats_ctx;
bpf_stats_enter(&stats_ctx, profiler_bpf_vfs_link);
diff --git a/tools/testing/selftests/bpf/progs/profiler1.c b/tools/testing/selftests/bpf/progs/profiler1.c
index 4df9088bfc00..fb6b13522949 100644
--- a/tools/testing/selftests/bpf/progs/profiler1.c
+++ b/tools/testing/selftests/bpf/progs/profiler1.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
-#define barrier_var(var) asm volatile("" : "=r"(var) : "0"(var))
#define UNROLL
#define INLINE __always_inline
#include "profiler.inc.h"
diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h
index 1ed28882daf3..6c7b1fb268d6 100644
--- a/tools/testing/selftests/bpf/progs/pyperf.h
+++ b/tools/testing/selftests/bpf/progs/pyperf.h
@@ -171,8 +171,6 @@ struct process_frame_ctx {
bool done;
};
-#define barrier_var(var) asm volatile("" : "=r"(var) : "0"(var))
-
static int process_frame_callback(__u32 i, struct process_frame_ctx *ctx)
{
int zero = 0;
@@ -299,7 +297,11 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx)
#ifdef NO_UNROLL
#pragma clang loop unroll(disable)
#else
+#ifdef UNROLL_COUNT
+#pragma clang loop unroll_count(UNROLL_COUNT)
+#else
#pragma clang loop unroll(full)
+#endif
#endif /* NO_UNROLL */
/* Unwind python stack */
for (int i = 0; i < STACK_MAX_LEN; ++i) {
diff --git a/tools/testing/selftests/bpf/progs/pyperf600.c b/tools/testing/selftests/bpf/progs/pyperf600.c
index cb49b89e37cd..ce1aa5189cc4 100644
--- a/tools/testing/selftests/bpf/progs/pyperf600.c
+++ b/tools/testing/selftests/bpf/progs/pyperf600.c
@@ -1,9 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 600
-/* clang will not unroll the loop 600 times.
- * Instead it will unroll it to the amount it deemed
- * appropriate, but the loop will still execute 600 times.
- * Total program size is around 90k insns
+/* Full unroll of 600 iterations will have total
+ * program size close to 298k insns and this may
+ * cause BPF_JMP insn out of 16-bit integer range.
+ * So limit the unroll size to 150 so the
+ * total program size is around 80k insns but
+ * the loop will still execute 600 times.
*/
+#define UNROLL_COUNT 150
#include "pyperf.h"
diff --git a/tools/testing/selftests/bpf/progs/skb_load_bytes.c b/tools/testing/selftests/bpf/progs/skb_load_bytes.c
new file mode 100644
index 000000000000..e4252fd973be
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/skb_load_bytes.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u32 load_offset = 0;
+int test_result = 0;
+
+SEC("tc")
+int skb_process(struct __sk_buff *skb)
+{
+ char buf[16];
+
+ test_result = bpf_skb_load_bytes(skb, load_offset, buf, 10);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/strncmp_test.c b/tools/testing/selftests/bpf/progs/strncmp_test.c
index 900d930d48a8..769668feed48 100644
--- a/tools/testing/selftests/bpf/progs/strncmp_test.c
+++ b/tools/testing/selftests/bpf/progs/strncmp_test.c
@@ -19,7 +19,7 @@ unsigned int no_const_str_size = STRNCMP_STR_SZ;
char _license[] SEC("license") = "GPL";
-SEC("tp/syscalls/sys_enter_nanosleep")
+SEC("?tp/syscalls/sys_enter_nanosleep")
int do_strncmp(void *ctx)
{
if ((bpf_get_current_pid_tgid() >> 32) != target_pid)
@@ -29,7 +29,7 @@ int do_strncmp(void *ctx)
return 0;
}
-SEC("tp/syscalls/sys_enter_nanosleep")
+SEC("?tp/syscalls/sys_enter_nanosleep")
int strncmp_bad_not_const_str_size(void *ctx)
{
/* The value of string size is not const, so will fail */
@@ -37,7 +37,7 @@ int strncmp_bad_not_const_str_size(void *ctx)
return 0;
}
-SEC("tp/syscalls/sys_enter_nanosleep")
+SEC("?tp/syscalls/sys_enter_nanosleep")
int strncmp_bad_writable_target(void *ctx)
{
/* Compared target is not read-only, so will fail */
@@ -45,7 +45,7 @@ int strncmp_bad_writable_target(void *ctx)
return 0;
}
-SEC("tp/syscalls/sys_enter_nanosleep")
+SEC("?tp/syscalls/sys_enter_nanosleep")
int strncmp_bad_not_null_term_target(void *ctx)
{
/* Compared target is not null-terminated, so will fail */
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c
new file mode 100644
index 000000000000..41ce83da78e8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+#define __unused __attribute__((unused))
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+int done = 0;
+
+SEC("tc")
+int classifier_0(struct __sk_buff *skb __unused)
+{
+ done = 1;
+ return 0;
+}
+
+static __noinline
+int subprog_tail(struct __sk_buff *skb)
+{
+ /* Don't propagate the constant to the caller */
+ volatile int ret = 1;
+
+ bpf_tail_call_static(skb, &jmp_table, 0);
+ return ret;
+}
+
+SEC("tc")
+int entry(struct __sk_buff *skb)
+{
+ /* Have data on stack which size is not a multiple of 8 */
+ volatile char arr[1] = {};
+
+ return subprog_tail(skb);
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/tcp_ca_incompl_cong_ops.c b/tools/testing/selftests/bpf/progs/tcp_ca_incompl_cong_ops.c
new file mode 100644
index 000000000000..7bb872fb22dd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tcp_ca_incompl_cong_ops.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+static inline struct tcp_sock *tcp_sk(const struct sock *sk)
+{
+ return (struct tcp_sock *)sk;
+}
+
+SEC("struct_ops/incompl_cong_ops_ssthresh")
+__u32 BPF_PROG(incompl_cong_ops_ssthresh, struct sock *sk)
+{
+ return tcp_sk(sk)->snd_ssthresh;
+}
+
+SEC("struct_ops/incompl_cong_ops_undo_cwnd")
+__u32 BPF_PROG(incompl_cong_ops_undo_cwnd, struct sock *sk)
+{
+ return tcp_sk(sk)->snd_cwnd;
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops incompl_cong_ops = {
+ /* Intentionally leaving out any of the required cong_avoid() and
+ * cong_control() here.
+ */
+ .ssthresh = (void *)incompl_cong_ops_ssthresh,
+ .undo_cwnd = (void *)incompl_cong_ops_undo_cwnd,
+ .name = "bpf_incompl_ops",
+};
diff --git a/tools/testing/selftests/bpf/progs/tcp_ca_unsupp_cong_op.c b/tools/testing/selftests/bpf/progs/tcp_ca_unsupp_cong_op.c
new file mode 100644
index 000000000000..c06f4a41c21a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tcp_ca_unsupp_cong_op.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops/unsupp_cong_op_get_info")
+size_t BPF_PROG(unsupp_cong_op_get_info, struct sock *sk, u32 ext, int *attr,
+ union tcp_cc_info *info)
+{
+ return 0;
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops unsupp_cong_op = {
+ .get_info = (void *)unsupp_cong_op_get_info,
+ .name = "bpf_unsupp_op",
+};
diff --git a/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c b/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c
new file mode 100644
index 000000000000..43447704cf0e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+#define USEC_PER_SEC 1000000UL
+
+#define min(a, b) ((a) < (b) ? (a) : (b))
+
+static inline struct tcp_sock *tcp_sk(const struct sock *sk)
+{
+ return (struct tcp_sock *)sk;
+}
+
+SEC("struct_ops/write_sk_pacing_init")
+void BPF_PROG(write_sk_pacing_init, struct sock *sk)
+{
+#ifdef ENABLE_ATOMICS_TESTS
+ __sync_bool_compare_and_swap(&sk->sk_pacing_status, SK_PACING_NONE,
+ SK_PACING_NEEDED);
+#else
+ sk->sk_pacing_status = SK_PACING_NEEDED;
+#endif
+}
+
+SEC("struct_ops/write_sk_pacing_cong_control")
+void BPF_PROG(write_sk_pacing_cong_control, struct sock *sk,
+ const struct rate_sample *rs)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+ unsigned long rate =
+ ((tp->snd_cwnd * tp->mss_cache * USEC_PER_SEC) << 3) /
+ (tp->srtt_us ?: 1U << 3);
+ sk->sk_pacing_rate = min(rate, sk->sk_max_pacing_rate);
+}
+
+SEC("struct_ops/write_sk_pacing_ssthresh")
+__u32 BPF_PROG(write_sk_pacing_ssthresh, struct sock *sk)
+{
+ return tcp_sk(sk)->snd_ssthresh;
+}
+
+SEC("struct_ops/write_sk_pacing_undo_cwnd")
+__u32 BPF_PROG(write_sk_pacing_undo_cwnd, struct sock *sk)
+{
+ return tcp_sk(sk)->snd_cwnd;
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops write_sk_pacing = {
+ .init = (void *)write_sk_pacing_init,
+ .cong_control = (void *)write_sk_pacing_cong_control,
+ .ssthresh = (void *)write_sk_pacing_ssthresh,
+ .undo_cwnd = (void *)write_sk_pacing_undo_cwnd,
+ .name = "bpf_w_sk_pacing",
+};
diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe.c b/tools/testing/selftests/bpf/progs/test_attach_probe.c
index 8056a4c6d918..a1e45fec8938 100644
--- a/tools/testing/selftests/bpf/progs/test_attach_probe.c
+++ b/tools/testing/selftests/bpf/progs/test_attach_probe.c
@@ -1,42 +1,155 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Facebook
-#include <linux/ptrace.h>
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_misc.h"
int kprobe_res = 0;
+int kprobe2_res = 0;
int kretprobe_res = 0;
+int kretprobe2_res = 0;
int uprobe_res = 0;
int uretprobe_res = 0;
+int uprobe_byname_res = 0;
+int uretprobe_byname_res = 0;
+int uprobe_byname2_res = 0;
+int uretprobe_byname2_res = 0;
+int uprobe_byname3_sleepable_res = 0;
+int uprobe_byname3_res = 0;
+int uretprobe_byname3_sleepable_res = 0;
+int uretprobe_byname3_res = 0;
+void *user_ptr = 0;
-SEC("kprobe/sys_nanosleep")
+SEC("kprobe")
int handle_kprobe(struct pt_regs *ctx)
{
kprobe_res = 1;
return 0;
}
-SEC("kretprobe/sys_nanosleep")
-int BPF_KRETPROBE(handle_kretprobe)
+SEC("ksyscall/nanosleep")
+int BPF_KSYSCALL(handle_kprobe_auto, struct __kernel_timespec *req, struct __kernel_timespec *rem)
+{
+ kprobe2_res = 11;
+ return 0;
+}
+
+/**
+ * This program will be manually made sleepable on the userspace side
+ * and should thus be unattachable.
+ */
+SEC("kprobe/" SYS_PREFIX "sys_nanosleep")
+int handle_kprobe_sleepable(struct pt_regs *ctx)
+{
+ kprobe_res = 2;
+ return 0;
+}
+
+SEC("kretprobe")
+int handle_kretprobe(struct pt_regs *ctx)
{
kretprobe_res = 2;
return 0;
}
-SEC("uprobe/trigger_func")
+SEC("kretsyscall/nanosleep")
+int BPF_KRETPROBE(handle_kretprobe_auto, int ret)
+{
+ kretprobe2_res = 22;
+ return ret;
+}
+
+SEC("uprobe")
int handle_uprobe(struct pt_regs *ctx)
{
uprobe_res = 3;
return 0;
}
-SEC("uretprobe/trigger_func")
+SEC("uretprobe")
int handle_uretprobe(struct pt_regs *ctx)
{
uretprobe_res = 4;
return 0;
}
+SEC("uprobe")
+int handle_uprobe_byname(struct pt_regs *ctx)
+{
+ uprobe_byname_res = 5;
+ return 0;
+}
+
+/* use auto-attach format for section definition. */
+SEC("uretprobe//proc/self/exe:trigger_func2")
+int handle_uretprobe_byname(struct pt_regs *ctx)
+{
+ uretprobe_byname_res = 6;
+ return 0;
+}
+
+SEC("uprobe")
+int handle_uprobe_byname2(struct pt_regs *ctx)
+{
+ unsigned int size = PT_REGS_PARM1(ctx);
+
+ /* verify malloc size */
+ if (size == 1)
+ uprobe_byname2_res = 7;
+ return 0;
+}
+
+SEC("uretprobe")
+int handle_uretprobe_byname2(struct pt_regs *ctx)
+{
+ uretprobe_byname2_res = 8;
+ return 0;
+}
+
+static __always_inline bool verify_sleepable_user_copy(void)
+{
+ char data[9];
+
+ bpf_copy_from_user(data, sizeof(data), user_ptr);
+ return bpf_strncmp(data, sizeof(data), "test_data") == 0;
+}
+
+SEC("uprobe.s//proc/self/exe:trigger_func3")
+int handle_uprobe_byname3_sleepable(struct pt_regs *ctx)
+{
+ if (verify_sleepable_user_copy())
+ uprobe_byname3_sleepable_res = 9;
+ return 0;
+}
+
+/**
+ * same target as the uprobe.s above to force sleepable and non-sleepable
+ * programs in the same bpf_prog_array
+ */
+SEC("uprobe//proc/self/exe:trigger_func3")
+int handle_uprobe_byname3(struct pt_regs *ctx)
+{
+ uprobe_byname3_res = 10;
+ return 0;
+}
+
+SEC("uretprobe.s//proc/self/exe:trigger_func3")
+int handle_uretprobe_byname3_sleepable(struct pt_regs *ctx)
+{
+ if (verify_sleepable_user_copy())
+ uretprobe_byname3_sleepable_res = 11;
+ return 0;
+}
+
+SEC("uretprobe//proc/self/exe:trigger_func3")
+int handle_uretprobe_byname3(struct pt_regs *ctx)
+{
+ uretprobe_byname3_res = 12;
+ return 0;
+}
+
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_cookie.c b/tools/testing/selftests/bpf/progs/test_bpf_cookie.c
index 2d3a7710e2ce..22d0ac8709b4 100644
--- a/tools/testing/selftests/bpf/progs/test_bpf_cookie.c
+++ b/tools/testing/selftests/bpf/progs/test_bpf_cookie.c
@@ -4,18 +4,23 @@
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+#include <errno.h>
int my_tid;
-int kprobe_res;
-int kprobe_multi_res;
-int kretprobe_res;
-int uprobe_res;
-int uretprobe_res;
-int tp_res;
-int pe_res;
+__u64 kprobe_res;
+__u64 kprobe_multi_res;
+__u64 kretprobe_res;
+__u64 uprobe_res;
+__u64 uretprobe_res;
+__u64 tp_res;
+__u64 pe_res;
+__u64 fentry_res;
+__u64 fexit_res;
+__u64 fmod_ret_res;
+__u64 lsm_res;
-static void update(void *ctx, int *res)
+static void update(void *ctx, __u64 *res)
{
if (my_tid != (u32)bpf_get_current_pid_tgid())
return;
@@ -37,14 +42,14 @@ int handle_kretprobe(struct pt_regs *ctx)
return 0;
}
-SEC("uprobe/trigger_func")
+SEC("uprobe")
int handle_uprobe(struct pt_regs *ctx)
{
update(ctx, &uprobe_res);
return 0;
}
-SEC("uretprobe/trigger_func")
+SEC("uretprobe")
int handle_uretprobe(struct pt_regs *ctx)
{
update(ctx, &uretprobe_res);
@@ -82,4 +87,35 @@ int handle_pe(struct pt_regs *ctx)
return 0;
}
+SEC("fentry/bpf_fentry_test1")
+int BPF_PROG(fentry_test1, int a)
+{
+ update(ctx, &fentry_res);
+ return 0;
+}
+
+SEC("fexit/bpf_fentry_test1")
+int BPF_PROG(fexit_test1, int a, int ret)
+{
+ update(ctx, &fexit_res);
+ return 0;
+}
+
+SEC("fmod_ret/bpf_modify_return_test")
+int BPF_PROG(fmod_ret_test, int _a, int *_b, int _ret)
+{
+ update(ctx, &fmod_ret_res);
+ return 1234;
+}
+
+SEC("lsm/file_mprotect")
+int BPF_PROG(test_int_hook, struct vm_area_struct *vma,
+ unsigned long reqprot, unsigned long prot, int ret)
+{
+ if (my_tid != (u32)bpf_get_current_pid_tgid())
+ return ret;
+ update(ctx, &lsm_res);
+ return -EPERM;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf.c b/tools/testing/selftests/bpf/progs/test_bpf_nf.c
index f00a9731930e..196cd8dfe42a 100644
--- a/tools/testing/selftests/bpf/progs/test_bpf_nf.c
+++ b/tools/testing/selftests/bpf/progs/test_bpf_nf.c
@@ -8,6 +8,8 @@
#define EINVAL 22
#define ENOENT 2
+extern unsigned long CONFIG_HZ __kconfig;
+
int test_einval_bpf_tuple = 0;
int test_einval_reserved = 0;
int test_einval_netns_id = 0;
@@ -16,6 +18,11 @@ int test_eproto_l4proto = 0;
int test_enonet_netns_id = 0;
int test_enoent_lookup = 0;
int test_eafnosupport = 0;
+int test_alloc_entry = -EINVAL;
+int test_insert_entry = -EAFNOSUPPORT;
+int test_succ_lookup = -ENOENT;
+u32 test_delta_timeout = 0;
+u32 test_status = 0;
struct nf_conn;
@@ -26,31 +33,44 @@ struct bpf_ct_opts___local {
u8 reserved[3];
} __attribute__((preserve_access_index));
+struct nf_conn *bpf_xdp_ct_alloc(struct xdp_md *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32) __ksym;
struct nf_conn *bpf_xdp_ct_lookup(struct xdp_md *, struct bpf_sock_tuple *, u32,
struct bpf_ct_opts___local *, u32) __ksym;
+struct nf_conn *bpf_skb_ct_alloc(struct __sk_buff *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32) __ksym;
struct nf_conn *bpf_skb_ct_lookup(struct __sk_buff *, struct bpf_sock_tuple *, u32,
struct bpf_ct_opts___local *, u32) __ksym;
+struct nf_conn *bpf_ct_insert_entry(struct nf_conn *) __ksym;
void bpf_ct_release(struct nf_conn *) __ksym;
+void bpf_ct_set_timeout(struct nf_conn *, u32) __ksym;
+int bpf_ct_change_timeout(struct nf_conn *, u32) __ksym;
+int bpf_ct_set_status(struct nf_conn *, u32) __ksym;
+int bpf_ct_change_status(struct nf_conn *, u32) __ksym;
static __always_inline void
-nf_ct_test(struct nf_conn *(*func)(void *, struct bpf_sock_tuple *, u32,
- struct bpf_ct_opts___local *, u32),
+nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32),
+ struct nf_conn *(*alloc_fn)(void *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32),
void *ctx)
{
struct bpf_ct_opts___local opts_def = { .l4proto = IPPROTO_TCP, .netns_id = -1 };
struct bpf_sock_tuple bpf_tuple;
struct nf_conn *ct;
+ int err;
__builtin_memset(&bpf_tuple, 0, sizeof(bpf_tuple.ipv4));
- ct = func(ctx, NULL, 0, &opts_def, sizeof(opts_def));
+ ct = lookup_fn(ctx, NULL, 0, &opts_def, sizeof(opts_def));
if (ct)
bpf_ct_release(ct);
else
test_einval_bpf_tuple = opts_def.error;
opts_def.reserved[0] = 1;
- ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def));
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
opts_def.reserved[0] = 0;
opts_def.l4proto = IPPROTO_TCP;
if (ct)
@@ -59,21 +79,24 @@ nf_ct_test(struct nf_conn *(*func)(void *, struct bpf_sock_tuple *, u32,
test_einval_reserved = opts_def.error;
opts_def.netns_id = -2;
- ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def));
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
opts_def.netns_id = -1;
if (ct)
bpf_ct_release(ct);
else
test_einval_netns_id = opts_def.error;
- ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def) - 1);
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def) - 1);
if (ct)
bpf_ct_release(ct);
else
test_einval_len_opts = opts_def.error;
opts_def.l4proto = IPPROTO_ICMP;
- ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def));
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
opts_def.l4proto = IPPROTO_TCP;
if (ct)
bpf_ct_release(ct);
@@ -81,37 +104,75 @@ nf_ct_test(struct nf_conn *(*func)(void *, struct bpf_sock_tuple *, u32,
test_eproto_l4proto = opts_def.error;
opts_def.netns_id = 0xf00f;
- ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def));
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
opts_def.netns_id = -1;
if (ct)
bpf_ct_release(ct);
else
test_enonet_netns_id = opts_def.error;
- ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def));
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
if (ct)
bpf_ct_release(ct);
else
test_enoent_lookup = opts_def.error;
- ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4) - 1, &opts_def, sizeof(opts_def));
+ ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4) - 1, &opts_def,
+ sizeof(opts_def));
if (ct)
bpf_ct_release(ct);
else
test_eafnosupport = opts_def.error;
+
+ bpf_tuple.ipv4.saddr = bpf_get_prandom_u32(); /* src IP */
+ bpf_tuple.ipv4.daddr = bpf_get_prandom_u32(); /* dst IP */
+ bpf_tuple.ipv4.sport = bpf_get_prandom_u32(); /* src port */
+ bpf_tuple.ipv4.dport = bpf_get_prandom_u32(); /* dst port */
+
+ ct = alloc_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
+ sizeof(opts_def));
+ if (ct) {
+ struct nf_conn *ct_ins;
+
+ bpf_ct_set_timeout(ct, 10000);
+ bpf_ct_set_status(ct, IPS_CONFIRMED);
+
+ ct_ins = bpf_ct_insert_entry(ct);
+ if (ct_ins) {
+ struct nf_conn *ct_lk;
+
+ ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4),
+ &opts_def, sizeof(opts_def));
+ if (ct_lk) {
+ /* update ct entry timeout */
+ bpf_ct_change_timeout(ct_lk, 10000);
+ test_delta_timeout = ct_lk->timeout - bpf_jiffies64();
+ test_delta_timeout /= CONFIG_HZ;
+ test_status = IPS_SEEN_REPLY;
+ bpf_ct_change_status(ct_lk, IPS_SEEN_REPLY);
+ bpf_ct_release(ct_lk);
+ test_succ_lookup = 0;
+ }
+ bpf_ct_release(ct_ins);
+ test_insert_entry = 0;
+ }
+ test_alloc_entry = 0;
+ }
}
SEC("xdp")
int nf_xdp_ct_test(struct xdp_md *ctx)
{
- nf_ct_test((void *)bpf_xdp_ct_lookup, ctx);
+ nf_ct_test((void *)bpf_xdp_ct_lookup, (void *)bpf_xdp_ct_alloc, ctx);
return 0;
}
SEC("tc")
int nf_skb_ct_test(struct __sk_buff *ctx)
{
- nf_ct_test((void *)bpf_skb_ct_lookup, ctx);
+ nf_ct_test((void *)bpf_skb_ct_lookup, (void *)bpf_skb_ct_alloc, ctx);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c b/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c
new file mode 100644
index 000000000000..bf79af15c808
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+struct nf_conn;
+
+struct bpf_ct_opts___local {
+ s32 netns_id;
+ s32 error;
+ u8 l4proto;
+ u8 reserved[3];
+} __attribute__((preserve_access_index));
+
+struct nf_conn *bpf_skb_ct_alloc(struct __sk_buff *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32) __ksym;
+struct nf_conn *bpf_skb_ct_lookup(struct __sk_buff *, struct bpf_sock_tuple *, u32,
+ struct bpf_ct_opts___local *, u32) __ksym;
+struct nf_conn *bpf_ct_insert_entry(struct nf_conn *) __ksym;
+void bpf_ct_release(struct nf_conn *) __ksym;
+void bpf_ct_set_timeout(struct nf_conn *, u32) __ksym;
+int bpf_ct_change_timeout(struct nf_conn *, u32) __ksym;
+int bpf_ct_set_status(struct nf_conn *, u32) __ksym;
+int bpf_ct_change_status(struct nf_conn *, u32) __ksym;
+
+SEC("?tc")
+int alloc_release(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ bpf_ct_release(ct);
+ return 0;
+}
+
+SEC("?tc")
+int insert_insert(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ ct = bpf_ct_insert_entry(ct);
+ if (!ct)
+ return 0;
+ ct = bpf_ct_insert_entry(ct);
+ return 0;
+}
+
+SEC("?tc")
+int lookup_insert(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_lookup(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ bpf_ct_insert_entry(ct);
+ return 0;
+}
+
+SEC("?tc")
+int set_timeout_after_insert(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ ct = bpf_ct_insert_entry(ct);
+ if (!ct)
+ return 0;
+ bpf_ct_set_timeout(ct, 0);
+ return 0;
+}
+
+SEC("?tc")
+int set_status_after_insert(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ ct = bpf_ct_insert_entry(ct);
+ if (!ct)
+ return 0;
+ bpf_ct_set_status(ct, 0);
+ return 0;
+}
+
+SEC("?tc")
+int change_timeout_after_alloc(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ bpf_ct_change_timeout(ct, 0);
+ return 0;
+}
+
+SEC("?tc")
+int change_status_after_alloc(struct __sk_buff *ctx)
+{
+ struct bpf_ct_opts___local opts = {};
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+
+ ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts));
+ if (!ct)
+ return 0;
+ bpf_ct_change_status(ct, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_btf_haskv.c b/tools/testing/selftests/bpf/progs/test_btf_haskv.c
deleted file mode 100644
index 07c94df13660..000000000000
--- a/tools/testing/selftests/bpf/progs/test_btf_haskv.c
+++ /dev/null
@@ -1,51 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018 Facebook */
-#include <linux/bpf.h>
-#include <bpf/bpf_helpers.h>
-#include "bpf_legacy.h"
-
-struct ipv_counts {
- unsigned int v4;
- unsigned int v6;
-};
-
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
-struct bpf_map_def SEC("maps") btf_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(struct ipv_counts),
- .max_entries = 4,
-};
-#pragma GCC diagnostic pop
-
-BPF_ANNOTATE_KV_PAIR(btf_map, int, struct ipv_counts);
-
-__attribute__((noinline))
-int test_long_fname_2(void)
-{
- struct ipv_counts *counts;
- int key = 0;
-
- counts = bpf_map_lookup_elem(&btf_map, &key);
- if (!counts)
- return 0;
-
- counts->v6++;
-
- return 0;
-}
-
-__attribute__((noinline))
-int test_long_fname_1(void)
-{
- return test_long_fname_2();
-}
-
-SEC("dummy_tracepoint")
-int _dummy_tracepoint(void *arg)
-{
- return test_long_fname_1();
-}
-
-char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_btf_newkv.c b/tools/testing/selftests/bpf/progs/test_btf_newkv.c
index 762671a2e90c..251854a041b5 100644
--- a/tools/testing/selftests/bpf/progs/test_btf_newkv.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_newkv.c
@@ -9,19 +9,6 @@ struct ipv_counts {
unsigned int v6;
};
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
-/* just to validate we can handle maps in multiple sections */
-struct bpf_map_def SEC("maps") btf_map_legacy = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(long long),
- .max_entries = 4,
-};
-#pragma GCC diagnostic pop
-
-BPF_ANNOTATE_KV_PAIR(btf_map_legacy, int, struct ipv_counts);
-
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 4);
@@ -41,11 +28,6 @@ int test_long_fname_2(void)
counts->v6++;
- /* just verify we can reference both maps */
- counts = bpf_map_lookup_elem(&btf_map_legacy, &key);
- if (!counts)
- return 0;
-
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_core_extern.c b/tools/testing/selftests/bpf/progs/test_core_extern.c
index 3ac3603ad53d..a3c7c1042f35 100644
--- a/tools/testing/selftests/bpf/progs/test_core_extern.c
+++ b/tools/testing/selftests/bpf/progs/test_core_extern.c
@@ -11,6 +11,7 @@
static int (*bpf_missing_helper)(const void *arg1, int arg2) = (void *) 999;
extern int LINUX_KERNEL_VERSION __kconfig;
+extern int LINUX_UNKNOWN_VIRTUAL_EXTERN __kconfig __weak;
extern bool CONFIG_BPF_SYSCALL __kconfig; /* strong */
extern enum libbpf_tristate CONFIG_TRISTATE __kconfig __weak;
extern bool CONFIG_BOOL __kconfig __weak;
@@ -22,6 +23,7 @@ extern const char CONFIG_STR[8] __kconfig __weak;
extern uint64_t CONFIG_MISSING __kconfig __weak;
uint64_t kern_ver = -1;
+uint64_t unkn_virt_val = -1;
uint64_t bpf_syscall = -1;
uint64_t tristate_val = -1;
uint64_t bool_val = -1;
@@ -38,6 +40,7 @@ int handle_sys_enter(struct pt_regs *ctx)
int i;
kern_ver = LINUX_KERNEL_VERSION;
+ unkn_virt_val = LINUX_UNKNOWN_VIRTUAL_EXTERN;
bpf_syscall = CONFIG_BPF_SYSCALL;
tristate_val = CONFIG_TRISTATE;
bool_val = CONFIG_BOOL;
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_enum64val.c b/tools/testing/selftests/bpf/progs/test_core_reloc_enum64val.c
new file mode 100644
index 000000000000..63147fbfae6e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_enum64val.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ char in[256];
+ char out[256];
+ bool skip;
+} data = {};
+
+enum named_unsigned_enum64 {
+ UNSIGNED_ENUM64_VAL1 = 0x1ffffffffULL,
+ UNSIGNED_ENUM64_VAL2 = 0x2ffffffffULL,
+ UNSIGNED_ENUM64_VAL3 = 0x3ffffffffULL,
+};
+
+enum named_signed_enum64 {
+ SIGNED_ENUM64_VAL1 = 0x1ffffffffLL,
+ SIGNED_ENUM64_VAL2 = -2,
+ SIGNED_ENUM64_VAL3 = 0x3ffffffffLL,
+};
+
+struct core_reloc_enum64val_output {
+ bool unsigned_val1_exists;
+ bool unsigned_val2_exists;
+ bool unsigned_val3_exists;
+ bool signed_val1_exists;
+ bool signed_val2_exists;
+ bool signed_val3_exists;
+
+ long unsigned_val1;
+ long unsigned_val2;
+ long signed_val1;
+ long signed_val2;
+};
+
+SEC("raw_tracepoint/sys_enter")
+int test_core_enum64val(void *ctx)
+{
+#if __clang_major__ >= 15
+ struct core_reloc_enum64val_output *out = (void *)&data.out;
+ enum named_unsigned_enum64 named_unsigned = 0;
+ enum named_signed_enum64 named_signed = 0;
+
+ out->unsigned_val1_exists = bpf_core_enum_value_exists(named_unsigned, UNSIGNED_ENUM64_VAL1);
+ out->unsigned_val2_exists = bpf_core_enum_value_exists(enum named_unsigned_enum64, UNSIGNED_ENUM64_VAL2);
+ out->unsigned_val3_exists = bpf_core_enum_value_exists(enum named_unsigned_enum64, UNSIGNED_ENUM64_VAL3);
+ out->signed_val1_exists = bpf_core_enum_value_exists(named_signed, SIGNED_ENUM64_VAL1);
+ out->signed_val2_exists = bpf_core_enum_value_exists(enum named_signed_enum64, SIGNED_ENUM64_VAL2);
+ out->signed_val3_exists = bpf_core_enum_value_exists(enum named_signed_enum64, SIGNED_ENUM64_VAL3);
+
+ out->unsigned_val1 = bpf_core_enum_value(named_unsigned, UNSIGNED_ENUM64_VAL1);
+ out->unsigned_val2 = bpf_core_enum_value(named_unsigned, UNSIGNED_ENUM64_VAL2);
+ out->signed_val1 = bpf_core_enum_value(named_signed, SIGNED_ENUM64_VAL1);
+ out->signed_val2 = bpf_core_enum_value(named_signed, SIGNED_ENUM64_VAL2);
+ /* NAMED_ENUM64_VAL3 value is optional */
+
+#else
+ data.skip = true;
+#endif
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c b/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c
index 7e45e2bdf6cd..5b8a75097ea3 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_existence.c
@@ -45,35 +45,34 @@ int test_core_existence(void *ctx)
struct core_reloc_existence_output *out = (void *)&data.out;
out->a_exists = bpf_core_field_exists(in->a);
- if (bpf_core_field_exists(in->a))
+ if (bpf_core_field_exists(struct core_reloc_existence, a))
out->a_value = BPF_CORE_READ(in, a);
else
out->a_value = 0xff000001u;
out->b_exists = bpf_core_field_exists(in->b);
- if (bpf_core_field_exists(in->b))
+ if (bpf_core_field_exists(struct core_reloc_existence, b))
out->b_value = BPF_CORE_READ(in, b);
else
out->b_value = 0xff000002u;
out->c_exists = bpf_core_field_exists(in->c);
- if (bpf_core_field_exists(in->c))
+ if (bpf_core_field_exists(struct core_reloc_existence, c))
out->c_value = BPF_CORE_READ(in, c);
else
out->c_value = 0xff000003u;
out->arr_exists = bpf_core_field_exists(in->arr);
- if (bpf_core_field_exists(in->arr))
+ if (bpf_core_field_exists(struct core_reloc_existence, arr))
out->arr_value = BPF_CORE_READ(in, arr[0]);
else
out->arr_value = 0xff000004u;
out->s_exists = bpf_core_field_exists(in->s);
- if (bpf_core_field_exists(in->s))
+ if (bpf_core_field_exists(struct core_reloc_existence, s))
out->s_value = BPF_CORE_READ(in, s.x);
else
out->s_value = 0xff000005u;
return 0;
}
-
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c b/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
index 145028b52ad8..a17dd83eae67 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
@@ -21,6 +21,7 @@ struct core_reloc_kernel_output {
/* we have test_progs[-flavor], so cut flavor part */
char comm[sizeof("test_progs")];
int comm_len;
+ bool local_task_struct_matches;
};
struct task_struct {
@@ -30,11 +31,25 @@ struct task_struct {
struct task_struct *group_leader;
};
+struct mm_struct___wrong {
+ int abc_whatever_should_not_exist;
+};
+
+struct task_struct___local {
+ int pid;
+ struct mm_struct___wrong *mm;
+};
+
#define CORE_READ(dst, src) bpf_core_read(dst, sizeof(*(dst)), src)
SEC("raw_tracepoint/sys_enter")
int test_core_kernel(void *ctx)
{
+ /* Support for the BPF_TYPE_MATCHES argument to the
+ * __builtin_preserve_type_info builtin was added at some point during
+ * development of clang 15 and it's what we require for this test.
+ */
+#if __has_builtin(__builtin_preserve_type_info) && __clang_major__ >= 15
struct task_struct *task = (void *)bpf_get_current_task();
struct core_reloc_kernel_output *out = (void *)&data.out;
uint64_t pid_tgid = bpf_get_current_pid_tgid();
@@ -93,6 +108,10 @@ int test_core_kernel(void *ctx)
group_leader, group_leader, group_leader, group_leader,
comm);
+ out->local_task_struct_matches = bpf_core_type_matches(struct task_struct___local);
+#else
+ data.skip = true;
+#endif
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_size.c b/tools/testing/selftests/bpf/progs/test_core_reloc_size.c
index 7b2d576aeea1..5b686053ce42 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_size.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_size.c
@@ -15,13 +15,21 @@ struct {
struct core_reloc_size_output {
int int_sz;
+ int int_off;
int struct_sz;
+ int struct_off;
int union_sz;
+ int union_off;
int arr_sz;
+ int arr_off;
int arr_elem_sz;
+ int arr_elem_off;
int ptr_sz;
+ int ptr_off;
int enum_sz;
+ int enum_off;
int float_sz;
+ int float_off;
};
struct core_reloc_size {
@@ -41,13 +49,28 @@ int test_core_size(void *ctx)
struct core_reloc_size_output *out = (void *)&data.out;
out->int_sz = bpf_core_field_size(in->int_field);
+ out->int_off = bpf_core_field_offset(in->int_field);
+
out->struct_sz = bpf_core_field_size(in->struct_field);
+ out->struct_off = bpf_core_field_offset(in->struct_field);
+
out->union_sz = bpf_core_field_size(in->union_field);
+ out->union_off = bpf_core_field_offset(in->union_field);
+
out->arr_sz = bpf_core_field_size(in->arr_field);
- out->arr_elem_sz = bpf_core_field_size(in->arr_field[0]);
- out->ptr_sz = bpf_core_field_size(in->ptr_field);
- out->enum_sz = bpf_core_field_size(in->enum_field);
- out->float_sz = bpf_core_field_size(in->float_field);
+ out->arr_off = bpf_core_field_offset(in->arr_field);
+
+ out->arr_elem_sz = bpf_core_field_size(struct core_reloc_size, arr_field[1]);
+ out->arr_elem_off = bpf_core_field_offset(struct core_reloc_size, arr_field[1]);
+
+ out->ptr_sz = bpf_core_field_size(struct core_reloc_size, ptr_field);
+ out->ptr_off = bpf_core_field_offset(struct core_reloc_size, ptr_field);
+
+ out->enum_sz = bpf_core_field_size(struct core_reloc_size, enum_field);
+ out->enum_off = bpf_core_field_offset(struct core_reloc_size, enum_field);
+
+ out->float_sz = bpf_core_field_size(struct core_reloc_size, float_field);
+ out->float_off = bpf_core_field_offset(struct core_reloc_size, float_field);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_type_based.c b/tools/testing/selftests/bpf/progs/test_core_reloc_type_based.c
index fb60f8195c53..2edb4df35e6e 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_type_based.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_type_based.c
@@ -19,6 +19,14 @@ struct a_struct {
int x;
};
+struct a_complex_struct {
+ union {
+ struct a_struct *a;
+ void *b;
+ } x;
+ volatile long y;
+};
+
union a_union {
int y;
int z;
@@ -43,6 +51,7 @@ typedef int int_typedef;
typedef enum { TYPEDEF_ENUM_VAL1, TYPEDEF_ENUM_VAL2 } enum_typedef;
typedef void *void_ptr_typedef;
+typedef int *restrict restrict_ptr_typedef;
typedef int (*func_proto_typedef)(long);
@@ -50,6 +59,7 @@ typedef char arr_typedef[20];
struct core_reloc_type_based_output {
bool struct_exists;
+ bool complex_struct_exists;
bool union_exists;
bool enum_exists;
bool typedef_named_struct_exists;
@@ -58,9 +68,24 @@ struct core_reloc_type_based_output {
bool typedef_int_exists;
bool typedef_enum_exists;
bool typedef_void_ptr_exists;
+ bool typedef_restrict_ptr_exists;
bool typedef_func_proto_exists;
bool typedef_arr_exists;
+ bool struct_matches;
+ bool complex_struct_matches;
+ bool union_matches;
+ bool enum_matches;
+ bool typedef_named_struct_matches;
+ bool typedef_anon_struct_matches;
+ bool typedef_struct_ptr_matches;
+ bool typedef_int_matches;
+ bool typedef_enum_matches;
+ bool typedef_void_ptr_matches;
+ bool typedef_restrict_ptr_matches;
+ bool typedef_func_proto_matches;
+ bool typedef_arr_matches;
+
int struct_sz;
int union_sz;
int enum_sz;
@@ -77,10 +102,17 @@ struct core_reloc_type_based_output {
SEC("raw_tracepoint/sys_enter")
int test_core_type_based(void *ctx)
{
-#if __has_builtin(__builtin_preserve_type_info)
+ /* Support for the BPF_TYPE_MATCHES argument to the
+ * __builtin_preserve_type_info builtin was added at some point during
+ * development of clang 15 and it's what we require for this test. Part of it
+ * could run with merely __builtin_preserve_type_info (which could be checked
+ * separately), but we have to find an upper bound.
+ */
+#if __has_builtin(__builtin_preserve_type_info) && __clang_major__ >= 15
struct core_reloc_type_based_output *out = (void *)&data.out;
out->struct_exists = bpf_core_type_exists(struct a_struct);
+ out->complex_struct_exists = bpf_core_type_exists(struct a_complex_struct);
out->union_exists = bpf_core_type_exists(union a_union);
out->enum_exists = bpf_core_type_exists(enum an_enum);
out->typedef_named_struct_exists = bpf_core_type_exists(named_struct_typedef);
@@ -89,9 +121,24 @@ int test_core_type_based(void *ctx)
out->typedef_int_exists = bpf_core_type_exists(int_typedef);
out->typedef_enum_exists = bpf_core_type_exists(enum_typedef);
out->typedef_void_ptr_exists = bpf_core_type_exists(void_ptr_typedef);
+ out->typedef_restrict_ptr_exists = bpf_core_type_exists(restrict_ptr_typedef);
out->typedef_func_proto_exists = bpf_core_type_exists(func_proto_typedef);
out->typedef_arr_exists = bpf_core_type_exists(arr_typedef);
+ out->struct_matches = bpf_core_type_matches(struct a_struct);
+ out->complex_struct_matches = bpf_core_type_matches(struct a_complex_struct);
+ out->union_matches = bpf_core_type_matches(union a_union);
+ out->enum_matches = bpf_core_type_matches(enum an_enum);
+ out->typedef_named_struct_matches = bpf_core_type_matches(named_struct_typedef);
+ out->typedef_anon_struct_matches = bpf_core_type_matches(anon_struct_typedef);
+ out->typedef_struct_ptr_matches = bpf_core_type_matches(struct_ptr_typedef);
+ out->typedef_int_matches = bpf_core_type_matches(int_typedef);
+ out->typedef_enum_matches = bpf_core_type_matches(enum_typedef);
+ out->typedef_void_ptr_matches = bpf_core_type_matches(void_ptr_typedef);
+ out->typedef_restrict_ptr_matches = bpf_core_type_matches(restrict_ptr_typedef);
+ out->typedef_func_proto_matches = bpf_core_type_matches(func_proto_typedef);
+ out->typedef_arr_matches = bpf_core_type_matches(arr_typedef);
+
out->struct_sz = bpf_core_type_size(struct a_struct);
out->union_sz = bpf_core_type_size(union a_union);
out->enum_sz = bpf_core_type_size(enum an_enum);
diff --git a/tools/testing/selftests/bpf/progs/test_global_func17.c b/tools/testing/selftests/bpf/progs/test_global_func17.c
new file mode 100644
index 000000000000..2b8b9b8ba018
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_global_func17.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+
+__noinline int foo(int *p)
+{
+ return p ? (*p = 42) : 0;
+}
+
+const volatile int i;
+
+SEC("tc")
+int test_cls(struct __sk_buff *skb)
+{
+ return foo((int *)&i);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_helper_restricted.c b/tools/testing/selftests/bpf/progs/test_helper_restricted.c
index 68d64c365f90..20ef9d433b97 100644
--- a/tools/testing/selftests/bpf/progs/test_helper_restricted.c
+++ b/tools/testing/selftests/bpf/progs/test_helper_restricted.c
@@ -56,7 +56,7 @@ static void spin_lock_work(void)
}
}
-SEC("raw_tp/sys_enter")
+SEC("?raw_tp/sys_enter")
int raw_tp_timer(void *ctx)
{
timer_work();
@@ -64,7 +64,7 @@ int raw_tp_timer(void *ctx)
return 0;
}
-SEC("tp/syscalls/sys_enter_nanosleep")
+SEC("?tp/syscalls/sys_enter_nanosleep")
int tp_timer(void *ctx)
{
timer_work();
@@ -72,7 +72,7 @@ int tp_timer(void *ctx)
return 0;
}
-SEC("kprobe/sys_nanosleep")
+SEC("?kprobe/sys_nanosleep")
int kprobe_timer(void *ctx)
{
timer_work();
@@ -80,7 +80,7 @@ int kprobe_timer(void *ctx)
return 0;
}
-SEC("perf_event")
+SEC("?perf_event")
int perf_event_timer(void *ctx)
{
timer_work();
@@ -88,7 +88,7 @@ int perf_event_timer(void *ctx)
return 0;
}
-SEC("raw_tp/sys_enter")
+SEC("?raw_tp/sys_enter")
int raw_tp_spin_lock(void *ctx)
{
spin_lock_work();
@@ -96,7 +96,7 @@ int raw_tp_spin_lock(void *ctx)
return 0;
}
-SEC("tp/syscalls/sys_enter_nanosleep")
+SEC("?tp/syscalls/sys_enter_nanosleep")
int tp_spin_lock(void *ctx)
{
spin_lock_work();
@@ -104,7 +104,7 @@ int tp_spin_lock(void *ctx)
return 0;
}
-SEC("kprobe/sys_nanosleep")
+SEC("?kprobe/sys_nanosleep")
int kprobe_spin_lock(void *ctx)
{
spin_lock_work();
@@ -112,7 +112,7 @@ int kprobe_spin_lock(void *ctx)
return 0;
}
-SEC("perf_event")
+SEC("?perf_event")
int perf_event_spin_lock(void *ctx)
{
spin_lock_work();
diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c b/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c
index 2180c41cd890..a72a5bf3812a 100644
--- a/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c
+++ b/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c
@@ -8,7 +8,7 @@
extern const int bpf_prog_active __ksym; /* int type global var. */
SEC("raw_tp/sys_enter")
-int handler(const void *ctx)
+int handler1(const void *ctx)
{
int *active;
__u32 cpu;
@@ -26,4 +26,20 @@ int handler(const void *ctx)
return 0;
}
+__noinline int write_active(int *p)
+{
+ return p ? (*p = 42) : 0;
+}
+
+SEC("raw_tp/sys_enter")
+int handler2(const void *ctx)
+{
+ int *active;
+ __u32 cpu;
+
+ active = bpf_this_cpu_ptr(&bpf_prog_active);
+ write_active(active);
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
index 19e4d2071c60..c8bc0c6947aa 100644
--- a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
@@ -218,7 +218,7 @@ static __noinline bool get_packet_dst(struct real_definition **real,
if (hash != 0x358459b7 /* jhash of ipv4 packet */ &&
hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
- return 0;
+ return false;
real_pos = bpf_map_lookup_elem(&ch_rings, &key);
if (!real_pos)
diff --git a/tools/testing/selftests/bpf/progs/test_log_fixup.c b/tools/testing/selftests/bpf/progs/test_log_fixup.c
new file mode 100644
index 000000000000..60450cb0e72e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_log_fixup.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+struct task_struct___bad {
+ int pid;
+ int fake_field;
+ void *fake_field_subprog;
+} __attribute__((preserve_access_index));
+
+SEC("?raw_tp/sys_enter")
+int bad_relo(const void *ctx)
+{
+ static struct task_struct___bad *t;
+
+ return bpf_core_field_size(t->fake_field);
+}
+
+static __noinline int bad_subprog(void)
+{
+ static struct task_struct___bad *t;
+
+ /* ugliness below is a field offset relocation */
+ return (void *)&t->fake_field_subprog - (void *)t;
+}
+
+SEC("?raw_tp/sys_enter")
+int bad_relo_subprog(const void *ctx)
+{
+ static struct task_struct___bad *t;
+
+ return bad_subprog() + bpf_core_field_size(t->pid);
+}
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} existing_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} missing_map SEC(".maps");
+
+SEC("?raw_tp/sys_enter")
+int use_missing_map(const void *ctx)
+{
+ int zero = 0, *value;
+
+ value = bpf_map_lookup_elem(&existing_map, &zero);
+
+ value = bpf_map_lookup_elem(&missing_map, &zero);
+
+ return value != NULL;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c b/tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c
new file mode 100644
index 000000000000..ca827b1092da
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Bytedance */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+__u64 percpu_array_elem_sum = 0;
+__u64 percpu_hash_elem_sum = 0;
+__u64 percpu_lru_hash_elem_sum = 0;
+const volatile int nr_cpus;
+const volatile int my_pid;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} percpu_array_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u64);
+ __type(value, __u64);
+} percpu_hash_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u64);
+ __type(value, __u64);
+} percpu_lru_hash_map SEC(".maps");
+
+struct read_percpu_elem_ctx {
+ void *map;
+ __u64 sum;
+};
+
+static int read_percpu_elem_callback(__u32 index, struct read_percpu_elem_ctx *ctx)
+{
+ __u64 key = 0;
+ __u64 *value;
+
+ value = bpf_map_lookup_percpu_elem(ctx->map, &key, index);
+ if (value)
+ ctx->sum += *value;
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_getuid")
+int sysenter_getuid(const void *ctx)
+{
+ struct read_percpu_elem_ctx map_ctx;
+
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ map_ctx.map = &percpu_array_map;
+ map_ctx.sum = 0;
+ bpf_loop(nr_cpus, read_percpu_elem_callback, &map_ctx, 0);
+ percpu_array_elem_sum = map_ctx.sum;
+
+ map_ctx.map = &percpu_hash_map;
+ map_ctx.sum = 0;
+ bpf_loop(nr_cpus, read_percpu_elem_callback, &map_ctx, 0);
+ percpu_hash_elem_sum = map_ctx.sum;
+
+ map_ctx.map = &percpu_lru_hash_map;
+ map_ctx.sum = 0;
+ bpf_loop(nr_cpus, read_percpu_elem_callback, &map_ctx, 0);
+ percpu_lru_hash_elem_sum = map_ctx.sum;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_module_attach.c b/tools/testing/selftests/bpf/progs/test_module_attach.c
index 50ce16d02da7..08628afedb77 100644
--- a/tools/testing/selftests/bpf/progs/test_module_attach.c
+++ b/tools/testing/selftests/bpf/progs/test_module_attach.c
@@ -64,7 +64,7 @@ int BPF_PROG(handle_fentry,
__u32 fentry_manual_read_sz = 0;
-SEC("fentry/placeholder")
+SEC("fentry")
int BPF_PROG(handle_fentry_manual,
struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len)
diff --git a/tools/testing/selftests/bpf/progs/test_pkt_access.c b/tools/testing/selftests/bpf/progs/test_pkt_access.c
index 0558544e1ff0..5cd7c096f62d 100644
--- a/tools/testing/selftests/bpf/progs/test_pkt_access.c
+++ b/tools/testing/selftests/bpf/progs/test_pkt_access.c
@@ -14,8 +14,6 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-#define barrier() __asm__ __volatile__("": : :"memory")
-
/* llvm will optimize both subprograms into exactly the same BPF assembly
*
* Disassembly of section .text:
diff --git a/tools/testing/selftests/bpf/progs/test_probe_user.c b/tools/testing/selftests/bpf/progs/test_probe_user.c
index 702578a5e496..a8e501af9604 100644
--- a/tools/testing/selftests/bpf/progs/test_probe_user.c
+++ b/tools/testing/selftests/bpf/progs/test_probe_user.c
@@ -1,37 +1,47 @@
// SPDX-License-Identifier: GPL-2.0
-
-#include <linux/ptrace.h>
-#include <linux/bpf.h>
-
-#include <netinet/in.h>
-
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
#include "bpf_misc.h"
static struct sockaddr_in old;
-SEC("kprobe/" SYS_PREFIX "sys_connect")
-int BPF_KPROBE(handle_sys_connect)
+static int handle_sys_connect_common(struct sockaddr_in *uservaddr)
{
-#if SYSCALL_WRAPPER == 1
- struct pt_regs *real_regs;
-#endif
struct sockaddr_in new;
- void *ptr;
-#if SYSCALL_WRAPPER == 0
- ptr = (void *)PT_REGS_PARM2(ctx);
-#else
- real_regs = (struct pt_regs *)PT_REGS_PARM1(ctx);
- bpf_probe_read_kernel(&ptr, sizeof(ptr), &PT_REGS_PARM2(real_regs));
+ bpf_probe_read_user(&old, sizeof(old), uservaddr);
+ __builtin_memset(&new, 0xab, sizeof(new));
+ bpf_probe_write_user(uservaddr, &new, sizeof(new));
+
+ return 0;
+}
+
+SEC("ksyscall/connect")
+int BPF_KSYSCALL(handle_sys_connect, int fd, struct sockaddr_in *uservaddr,
+ int addrlen)
+{
+ return handle_sys_connect_common(uservaddr);
+}
+
+#if defined(bpf_target_s390)
+#ifndef SYS_CONNECT
+#define SYS_CONNECT 3
#endif
- bpf_probe_read_user(&old, sizeof(old), ptr);
- __builtin_memset(&new, 0xab, sizeof(new));
- bpf_probe_write_user(ptr, &new, sizeof(new));
+SEC("ksyscall/socketcall")
+int BPF_KSYSCALL(handle_sys_socketcall, int call, unsigned long *args)
+{
+ if (call == SYS_CONNECT) {
+ struct sockaddr_in *uservaddr;
+
+ bpf_probe_read_user(&uservaddr, sizeof(uservaddr), &args[1]);
+ return handle_sys_connect_common(uservaddr);
+ }
return 0;
}
+#endif
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c b/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c
index 197b86546dca..e416e0ce12b7 100644
--- a/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c
+++ b/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c
@@ -15,6 +15,8 @@ struct sample {
struct ringbuf_map {
__uint(type, BPF_MAP_TYPE_RINGBUF);
+ /* libbpf will adjust to valid page size */
+ __uint(max_entries, 1000);
} ringbuf1 SEC(".maps"),
ringbuf2 SEC(".maps");
diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign.c b/tools/testing/selftests/bpf/progs/test_sk_assign.c
index 02f79356d5eb..98c6493d9b91 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_assign.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_assign.c
@@ -89,7 +89,6 @@ get_tuple(struct __sk_buff *skb, bool *ipv4, bool *tcp)
static inline int
handle_udp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
{
- struct bpf_sock_tuple ln = {0};
struct bpf_sock *sk;
const int zero = 0;
size_t tuple_len;
@@ -121,7 +120,6 @@ assign:
static inline int
handle_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, bool ipv4)
{
- struct bpf_sock_tuple ln = {0};
struct bpf_sock *sk;
const int zero = 0;
size_t tuple_len;
@@ -161,7 +159,7 @@ assign:
SEC("tc")
int bpf_sk_assign_test(struct __sk_buff *skb)
{
- struct bpf_sock_tuple *tuple, ln = {0};
+ struct bpf_sock_tuple *tuple;
bool ipv4 = false;
bool tcp = false;
int tuple_len;
diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
index 40f161480a2f..b502e5c92e33 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
@@ -52,7 +52,7 @@ static struct bpf_sock_tuple *get_tuple(void *data, __u64 nh_off,
return result;
}
-SEC("tc")
+SEC("?tc")
int sk_lookup_success(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
@@ -78,7 +78,7 @@ int sk_lookup_success(struct __sk_buff *skb)
return sk ? TC_ACT_OK : TC_ACT_UNSPEC;
}
-SEC("tc")
+SEC("?tc")
int sk_lookup_success_simple(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -90,7 +90,7 @@ int sk_lookup_success_simple(struct __sk_buff *skb)
return 0;
}
-SEC("tc")
+SEC("?tc")
int err_use_after_free(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -105,7 +105,7 @@ int err_use_after_free(struct __sk_buff *skb)
return family;
}
-SEC("tc")
+SEC("?tc")
int err_modify_sk_pointer(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -120,7 +120,7 @@ int err_modify_sk_pointer(struct __sk_buff *skb)
return 0;
}
-SEC("tc")
+SEC("?tc")
int err_modify_sk_or_null_pointer(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -134,7 +134,7 @@ int err_modify_sk_or_null_pointer(struct __sk_buff *skb)
return 0;
}
-SEC("tc")
+SEC("?tc")
int err_no_release(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -143,7 +143,7 @@ int err_no_release(struct __sk_buff *skb)
return 0;
}
-SEC("tc")
+SEC("?tc")
int err_release_twice(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -155,7 +155,7 @@ int err_release_twice(struct __sk_buff *skb)
return 0;
}
-SEC("tc")
+SEC("?tc")
int err_release_unchecked(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -172,7 +172,7 @@ void lookup_no_release(struct __sk_buff *skb)
bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
}
-SEC("tc")
+SEC("?tc")
int err_no_release_subcall(struct __sk_buff *skb)
{
lookup_no_release(skb);
diff --git a/tools/testing/selftests/bpf/progs/test_skeleton.c b/tools/testing/selftests/bpf/progs/test_skeleton.c
index 1b1187d2967b..1a4e93f6d9df 100644
--- a/tools/testing/selftests/bpf/progs/test_skeleton.c
+++ b/tools/testing/selftests/bpf/progs/test_skeleton.c
@@ -51,6 +51,8 @@ int out_dynarr[4] SEC(".data.dyn") = { 1, 2, 3, 4 };
int read_mostly_var __read_mostly;
int out_mostly_var;
+char huge_arr[16 * 1024 * 1024];
+
SEC("raw_tp/sys_enter")
int handler(const void *ctx)
{
@@ -71,6 +73,8 @@ int handler(const void *ctx)
out_mostly_var = read_mostly_var;
+ huge_arr[sizeof(huge_arr) - 1] = 123;
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
index 6c62bfb8bb6f..0c4426592a26 100644
--- a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
+++ b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
@@ -39,7 +39,7 @@ struct {
__type(value, stack_trace_t);
} stack_amap SEC(".maps");
-SEC("kprobe/urandom_read")
+SEC("kprobe/urandom_read_iter")
int oncpu(struct pt_regs *args)
{
__u32 max_len = sizeof(struct bpf_stack_build_id)
diff --git a/tools/testing/selftests/bpf/progs/test_subprogs.c b/tools/testing/selftests/bpf/progs/test_subprogs.c
index b7c37ca09544..f8e9256cf18d 100644
--- a/tools/testing/selftests/bpf/progs/test_subprogs.c
+++ b/tools/testing/selftests/bpf/progs/test_subprogs.c
@@ -89,6 +89,11 @@ int prog2(void *ctx)
return 0;
}
+static int empty_callback(__u32 index, void *data)
+{
+ return 0;
+}
+
/* prog3 has the same section name as prog1 */
SEC("raw_tp/sys_enter")
int prog3(void *ctx)
@@ -98,6 +103,9 @@ int prog3(void *ctx)
if (!BPF_CORE_READ(t, pid) || !get_task_tgid((uintptr_t)t))
return 1;
+ /* test that ld_imm64 with BPF_PSEUDO_FUNC doesn't get blinded */
+ bpf_loop(1, empty_callback, NULL, 0);
+
res3 = sub3(5) + 6; /* (5 + 3 + (4 + 1)) + 6 = 19 */
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_task_pt_regs.c b/tools/testing/selftests/bpf/progs/test_task_pt_regs.c
index e6cb09259408..1926facba122 100644
--- a/tools/testing/selftests/bpf/progs/test_task_pt_regs.c
+++ b/tools/testing/selftests/bpf/progs/test_task_pt_regs.c
@@ -14,7 +14,7 @@ char current_regs[PT_REGS_SIZE] = {};
char ctx_regs[PT_REGS_SIZE] = {};
int uprobe_res = 0;
-SEC("uprobe/trigger_func")
+SEC("uprobe")
int handle_uprobe(struct pt_regs *ctx)
{
struct task_struct *current;
diff --git a/tools/testing/selftests/bpf/progs/test_tc_dtime.c b/tools/testing/selftests/bpf/progs/test_tc_dtime.c
index 06f300d06dbd..b596479a9ebe 100644
--- a/tools/testing/selftests/bpf/progs/test_tc_dtime.c
+++ b/tools/testing/selftests/bpf/progs/test_tc_dtime.c
@@ -11,6 +11,8 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <sys/socket.h>
@@ -115,6 +117,19 @@ static bool bpf_fwd(void)
return test < TCP_IP4_RT_FWD;
}
+static __u8 get_proto(void)
+{
+ switch (test) {
+ case UDP_IP4:
+ case UDP_IP6:
+ case UDP_IP4_RT_FWD:
+ case UDP_IP6_RT_FWD:
+ return IPPROTO_UDP;
+ default:
+ return IPPROTO_TCP;
+ }
+}
+
/* -1: parse error: TC_ACT_SHOT
* 0: not testing traffic: TC_ACT_OK
* >0: first byte is the inet_proto, second byte has the netns
@@ -122,11 +137,16 @@ static bool bpf_fwd(void)
*/
static int skb_get_type(struct __sk_buff *skb)
{
+ __u16 dst_ns_port = __bpf_htons(50000 + test);
void *data_end = ctx_ptr(skb->data_end);
void *data = ctx_ptr(skb->data);
__u8 inet_proto = 0, ns = 0;
struct ipv6hdr *ip6h;
+ __u16 sport, dport;
struct iphdr *iph;
+ struct tcphdr *th;
+ struct udphdr *uh;
+ void *trans;
switch (skb->protocol) {
case __bpf_htons(ETH_P_IP):
@@ -138,6 +158,7 @@ static int skb_get_type(struct __sk_buff *skb)
else if (iph->saddr == ip4_dst)
ns = DST_NS;
inet_proto = iph->protocol;
+ trans = iph + 1;
break;
case __bpf_htons(ETH_P_IPV6):
ip6h = data + sizeof(struct ethhdr);
@@ -148,15 +169,43 @@ static int skb_get_type(struct __sk_buff *skb)
else if (v6_equal(ip6h->saddr, (struct in6_addr)ip6_dst))
ns = DST_NS;
inet_proto = ip6h->nexthdr;
+ trans = ip6h + 1;
break;
default:
return 0;
}
- if ((inet_proto != IPPROTO_TCP && inet_proto != IPPROTO_UDP) || !ns)
+ /* skb is not from src_ns or dst_ns.
+ * skb is not the testing IPPROTO.
+ */
+ if (!ns || inet_proto != get_proto())
return 0;
- return (ns << 8 | inet_proto);
+ switch (inet_proto) {
+ case IPPROTO_TCP:
+ th = trans;
+ if (th + 1 > data_end)
+ return -1;
+ sport = th->source;
+ dport = th->dest;
+ break;
+ case IPPROTO_UDP:
+ uh = trans;
+ if (uh + 1 > data_end)
+ return -1;
+ sport = uh->source;
+ dport = uh->dest;
+ break;
+ default:
+ return 0;
+ }
+
+ /* The skb is the testing traffic */
+ if ((ns == SRC_NS && dport == dst_ns_port) ||
+ (ns == DST_NS && sport == dst_ns_port))
+ return (ns << 8 | inet_proto);
+
+ return 0;
}
/* format: direction@iface@netns
diff --git a/tools/testing/selftests/bpf/progs/test_trampoline_count.c b/tools/testing/selftests/bpf/progs/test_trampoline_count.c
index f030e469d05b..7765720da7d5 100644
--- a/tools/testing/selftests/bpf/progs/test_trampoline_count.c
+++ b/tools/testing/selftests/bpf/progs/test_trampoline_count.c
@@ -1,20 +1,22 @@
// SPDX-License-Identifier: GPL-2.0
-#include <stdbool.h>
-#include <stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-struct task_struct;
+SEC("fentry/bpf_modify_return_test")
+int BPF_PROG(fentry_test, int a, int *b)
+{
+ return 0;
+}
-SEC("fentry/__set_task_comm")
-int BPF_PROG(prog1, struct task_struct *tsk, const char *buf, bool exec)
+SEC("fmod_ret/bpf_modify_return_test")
+int BPF_PROG(fmod_ret_test, int a, int *b, int ret)
{
return 0;
}
-SEC("fexit/__set_task_comm")
-int BPF_PROG(prog2, struct task_struct *tsk, const char *buf, bool exec)
+SEC("fexit/bpf_modify_return_test")
+int BPF_PROG(fexit_test, int a, int *b, int ret)
{
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
index ef0dde83b85a..df0673c4ecbe 100644
--- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
@@ -14,17 +14,23 @@
#include <linux/if_packet.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/icmp.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/pkt_cls.h>
#include <linux/erspan.h>
+#include <linux/udp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-#define ERROR(ret) do {\
- char fmt[] = "ERROR line:%d ret:%d\n";\
- bpf_trace_printk(fmt, sizeof(fmt), __LINE__, ret); \
- } while (0)
+#define log_err(__ret) bpf_printk("ERROR line:%d ret:%d\n", __LINE__, __ret)
+
+#define VXLAN_UDP_PORT 4789
+
+/* Only IPv4 address assigned to veth1.
+ * 172.16.1.200
+ */
+#define ASSIGNED_ADDR_VETH1 0xac1001c8
struct geneve_opt {
__be16 opt_class;
@@ -36,12 +42,24 @@ struct geneve_opt {
__u8 opt_data[8]; /* hard-coded to 8 byte */
};
+struct vxlanhdr {
+ __be32 vx_flags;
+ __be32 vx_vni;
+} __attribute__((packed));
+
struct vxlan_metadata {
__u32 gbp;
};
-SEC("gre_set_tunnel")
-int _gre_set_tunnel(struct __sk_buff *skb)
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} local_ip_map SEC(".maps");
+
+SEC("tc")
+int gre_set_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
@@ -55,32 +73,31 @@ int _gre_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_ZERO_CSUM_TX | BPF_F_SEQ_NUMBER);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("gre_get_tunnel")
-int _gre_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int gre_get_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
- char fmt[] = "key %d remote ip 0x%x\n";
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt), key.tunnel_id, key.remote_ipv4);
+ bpf_printk("key %d remote ip 0x%x\n", key.tunnel_id, key.remote_ipv4);
return TC_ACT_OK;
}
-SEC("ip6gretap_set_tunnel")
-int _ip6gretap_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip6gretap_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
int ret;
@@ -96,35 +113,34 @@ int _ip6gretap_set_tunnel(struct __sk_buff *skb)
BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
BPF_F_SEQ_NUMBER);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("ip6gretap_get_tunnel")
-int _ip6gretap_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip6gretap_get_tunnel(struct __sk_buff *skb)
{
- char fmt[] = "key %d remote ip6 ::%x label %x\n";
struct bpf_tunnel_key key;
int ret;
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt),
- key.tunnel_id, key.remote_ipv6[3], key.tunnel_label);
+ bpf_printk("key %d remote ip6 ::%x label %x\n",
+ key.tunnel_id, key.remote_ipv6[3], key.tunnel_label);
return TC_ACT_OK;
}
-SEC("erspan_set_tunnel")
-int _erspan_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int erspan_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
struct erspan_metadata md;
@@ -139,7 +155,7 @@ int _erspan_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_ZERO_CSUM_TX);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
@@ -159,17 +175,16 @@ int _erspan_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("erspan_get_tunnel")
-int _erspan_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int erspan_get_tunnel(struct __sk_buff *skb)
{
- char fmt[] = "key %d remote ip 0x%x erspan version %d\n";
struct bpf_tunnel_key key;
struct erspan_metadata md;
__u32 index;
@@ -177,38 +192,34 @@ int _erspan_get_tunnel(struct __sk_buff *skb)
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt),
- key.tunnel_id, key.remote_ipv4, md.version);
+ bpf_printk("key %d remote ip 0x%x erspan version %d\n",
+ key.tunnel_id, key.remote_ipv4, md.version);
#ifdef ERSPAN_V1
- char fmt2[] = "\tindex %x\n";
-
index = bpf_ntohl(md.u.index);
- bpf_trace_printk(fmt2, sizeof(fmt2), index);
+ bpf_printk("\tindex %x\n", index);
#else
- char fmt2[] = "\tdirection %d hwid %x timestamp %u\n";
-
- bpf_trace_printk(fmt2, sizeof(fmt2),
- md.u.md2.dir,
- (md.u.md2.hwid_upper << 4) + md.u.md2.hwid,
- bpf_ntohl(md.u.md2.timestamp));
+ bpf_printk("\tdirection %d hwid %x timestamp %u\n",
+ md.u.md2.dir,
+ (md.u.md2.hwid_upper << 4) + md.u.md2.hwid,
+ bpf_ntohl(md.u.md2.timestamp));
#endif
return TC_ACT_OK;
}
-SEC("ip4ip6erspan_set_tunnel")
-int _ip4ip6erspan_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip4ip6erspan_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
struct erspan_metadata md;
@@ -223,7 +234,7 @@ int _ip4ip6erspan_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
@@ -244,17 +255,16 @@ int _ip4ip6erspan_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("ip4ip6erspan_get_tunnel")
-int _ip4ip6erspan_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip4ip6erspan_get_tunnel(struct __sk_buff *skb)
{
- char fmt[] = "ip6erspan get key %d remote ip6 ::%x erspan version %d\n";
struct bpf_tunnel_key key;
struct erspan_metadata md;
__u32 index;
@@ -263,44 +273,88 @@ int _ip4ip6erspan_get_tunnel(struct __sk_buff *skb)
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt),
- key.tunnel_id, key.remote_ipv4, md.version);
+ bpf_printk("ip6erspan get key %d remote ip6 ::%x erspan version %d\n",
+ key.tunnel_id, key.remote_ipv4, md.version);
#ifdef ERSPAN_V1
- char fmt2[] = "\tindex %x\n";
-
index = bpf_ntohl(md.u.index);
- bpf_trace_printk(fmt2, sizeof(fmt2), index);
+ bpf_printk("\tindex %x\n", index);
#else
- char fmt2[] = "\tdirection %d hwid %x timestamp %u\n";
-
- bpf_trace_printk(fmt2, sizeof(fmt2),
- md.u.md2.dir,
- (md.u.md2.hwid_upper << 4) + md.u.md2.hwid,
- bpf_ntohl(md.u.md2.timestamp));
+ bpf_printk("\tdirection %d hwid %x timestamp %u\n",
+ md.u.md2.dir,
+ (md.u.md2.hwid_upper << 4) + md.u.md2.hwid,
+ bpf_ntohl(md.u.md2.timestamp));
#endif
return TC_ACT_OK;
}
-SEC("vxlan_set_tunnel")
-int _vxlan_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int vxlan_set_tunnel_dst(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+ struct vxlan_metadata md;
+ __u32 index = 0;
+ __u32 *local_ip = NULL;
+
+ local_ip = bpf_map_lookup_elem(&local_ip_map, &index);
+ if (!local_ip) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+
+ __builtin_memset(&key, 0x0, sizeof(key));
+ key.local_ipv4 = 0xac100164; /* 172.16.1.100 */
+ key.remote_ipv4 = *local_ip;
+ key.tunnel_id = 2;
+ key.tunnel_tos = 0;
+ key.tunnel_ttl = 64;
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
+ BPF_F_ZERO_CSUM_TX);
+ if (ret < 0) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+
+ md.gbp = 0x800FF; /* Set VXLAN Group Policy extension */
+ ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
+ if (ret < 0) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("tc")
+int vxlan_set_tunnel_src(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
struct vxlan_metadata md;
+ __u32 index = 0;
+ __u32 *local_ip = NULL;
+
+ local_ip = bpf_map_lookup_elem(&local_ip_map, &index);
+ if (!local_ip) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
__builtin_memset(&key, 0x0, sizeof(key));
+ key.local_ipv4 = *local_ip;
key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
key.tunnel_id = 2;
key.tunnel_tos = 0;
@@ -309,53 +363,154 @@ int _vxlan_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_ZERO_CSUM_TX);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
md.gbp = 0x800FF; /* Set VXLAN Group Policy extension */
ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("vxlan_get_tunnel")
-int _vxlan_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int vxlan_get_tunnel_src(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
struct vxlan_metadata md;
- char fmt[] = "key %d remote ip 0x%x vxlan gbp 0x%x\n";
+ __u32 orig_daddr;
+ __u32 index = 0;
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md));
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt),
- key.tunnel_id, key.remote_ipv4, md.gbp);
+ if (key.local_ipv4 != ASSIGNED_ADDR_VETH1 || md.gbp != 0x800FF) {
+ bpf_printk("vxlan key %d local ip 0x%x remote ip 0x%x gbp 0x%x\n",
+ key.tunnel_id, key.local_ipv4,
+ key.remote_ipv4, md.gbp);
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
return TC_ACT_OK;
}
-SEC("ip6vxlan_set_tunnel")
-int _ip6vxlan_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int veth_set_outer_dst(struct __sk_buff *skb)
+{
+ struct ethhdr *eth = (struct ethhdr *)(long)skb->data;
+ __u32 assigned_ip = bpf_htonl(ASSIGNED_ADDR_VETH1);
+ void *data_end = (void *)(long)skb->data_end;
+ struct udphdr *udph;
+ struct iphdr *iph;
+ __u32 index = 0;
+ int ret = 0;
+ int shrink;
+ __s64 csum;
+
+ if ((void *)eth + sizeof(*eth) > data_end) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+
+ if (eth->h_proto != bpf_htons(ETH_P_IP))
+ return TC_ACT_OK;
+
+ iph = (struct iphdr *)(eth + 1);
+ if ((void *)iph + sizeof(*iph) > data_end) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+ if (iph->protocol != IPPROTO_UDP)
+ return TC_ACT_OK;
+
+ udph = (struct udphdr *)(iph + 1);
+ if ((void *)udph + sizeof(*udph) > data_end) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+ if (udph->dest != bpf_htons(VXLAN_UDP_PORT))
+ return TC_ACT_OK;
+
+ if (iph->daddr != assigned_ip) {
+ csum = bpf_csum_diff(&iph->daddr, sizeof(__u32), &assigned_ip,
+ sizeof(__u32), 0);
+ if (bpf_skb_store_bytes(skb, ETH_HLEN + offsetof(struct iphdr, daddr),
+ &assigned_ip, sizeof(__u32), 0) < 0) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+ if (bpf_l3_csum_replace(skb, ETH_HLEN + offsetof(struct iphdr, check),
+ 0, csum, 0) < 0) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+ bpf_skb_change_type(skb, PACKET_HOST);
+ }
+ return TC_ACT_OK;
+}
+
+SEC("tc")
+int ip6vxlan_set_tunnel_dst(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
int ret;
+ __u32 index = 0;
+ __u32 *local_ip;
+
+ local_ip = bpf_map_lookup_elem(&local_ip_map, &index);
+ if (!local_ip) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
__builtin_memset(&key, 0x0, sizeof(key));
+ key.local_ipv6[3] = bpf_htonl(0x11); /* ::11 */
+ key.remote_ipv6[3] = bpf_htonl(*local_ip);
+ key.tunnel_id = 22;
+ key.tunnel_tos = 0;
+ key.tunnel_ttl = 64;
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
+ BPF_F_TUNINFO_IPV6);
+ if (ret < 0) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("tc")
+int ip6vxlan_set_tunnel_src(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key key;
+ int ret;
+ __u32 index = 0;
+ __u32 *local_ip;
+
+ local_ip = bpf_map_lookup_elem(&local_ip_map, &index);
+ if (!local_ip) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
+
+ __builtin_memset(&key, 0x0, sizeof(key));
+ key.local_ipv6[3] = bpf_htonl(*local_ip);
key.remote_ipv6[3] = bpf_htonl(0x11); /* ::11 */
key.tunnel_id = 22;
key.tunnel_tos = 0;
@@ -364,35 +519,48 @@ int _ip6vxlan_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("ip6vxlan_get_tunnel")
-int _ip6vxlan_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip6vxlan_get_tunnel_src(struct __sk_buff *skb)
{
- char fmt[] = "key %d remote ip6 ::%x label %x\n";
struct bpf_tunnel_key key;
int ret;
+ __u32 index = 0;
+ __u32 *local_ip;
+
+ local_ip = bpf_map_lookup_elem(&local_ip_map, &index);
+ if (!local_ip) {
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt),
- key.tunnel_id, key.remote_ipv6[3], key.tunnel_label);
+ if (bpf_ntohl(key.local_ipv6[3]) != *local_ip) {
+ bpf_printk("ip6vxlan key %d local ip6 ::%x remote ip6 ::%x label 0x%x\n",
+ key.tunnel_id, bpf_ntohl(key.local_ipv6[3]),
+ bpf_ntohl(key.remote_ipv6[3]), key.tunnel_label);
+ bpf_printk("local_ip 0x%x\n", *local_ip);
+ log_err(ret);
+ return TC_ACT_SHOT;
+ }
return TC_ACT_OK;
}
-SEC("geneve_set_tunnel")
-int _geneve_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int geneve_set_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
@@ -416,30 +584,29 @@ int _geneve_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_ZERO_CSUM_TX);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
ret = bpf_skb_set_tunnel_opt(skb, &gopt, sizeof(gopt));
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("geneve_get_tunnel")
-int _geneve_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int geneve_get_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
struct geneve_opt gopt;
- char fmt[] = "key %d remote ip 0x%x geneve class 0x%x\n";
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
@@ -447,13 +614,13 @@ int _geneve_get_tunnel(struct __sk_buff *skb)
if (ret < 0)
gopt.opt_class = 0;
- bpf_trace_printk(fmt, sizeof(fmt),
- key.tunnel_id, key.remote_ipv4, gopt.opt_class);
+ bpf_printk("key %d remote ip 0x%x geneve class 0x%x\n",
+ key.tunnel_id, key.remote_ipv4, gopt.opt_class);
return TC_ACT_OK;
}
-SEC("ip6geneve_set_tunnel")
-int _ip6geneve_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip6geneve_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key;
struct geneve_opt gopt;
@@ -468,7 +635,7 @@ int _ip6geneve_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
@@ -483,17 +650,16 @@ int _ip6geneve_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_opt(skb, &gopt, sizeof(gopt));
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("ip6geneve_get_tunnel")
-int _ip6geneve_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip6geneve_get_tunnel(struct __sk_buff *skb)
{
- char fmt[] = "key %d remote ip 0x%x geneve class 0x%x\n";
struct bpf_tunnel_key key;
struct geneve_opt gopt;
int ret;
@@ -501,7 +667,7 @@ int _ip6geneve_get_tunnel(struct __sk_buff *skb)
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
@@ -509,14 +675,14 @@ int _ip6geneve_get_tunnel(struct __sk_buff *skb)
if (ret < 0)
gopt.opt_class = 0;
- bpf_trace_printk(fmt, sizeof(fmt),
- key.tunnel_id, key.remote_ipv4, gopt.opt_class);
+ bpf_printk("key %d remote ip 0x%x geneve class 0x%x\n",
+ key.tunnel_id, key.remote_ipv4, gopt.opt_class);
return TC_ACT_OK;
}
-SEC("ipip_set_tunnel")
-int _ipip_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ipip_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key = {};
void *data = (void *)(long)skb->data;
@@ -526,7 +692,7 @@ int _ipip_set_tunnel(struct __sk_buff *skb)
/* single length check */
if (data + sizeof(*iph) > data_end) {
- ERROR(1);
+ log_err(1);
return TC_ACT_SHOT;
}
@@ -537,32 +703,31 @@ int _ipip_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("ipip_get_tunnel")
-int _ipip_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ipip_get_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
- char fmt[] = "remote ip 0x%x\n";
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt), key.remote_ipv4);
+ bpf_printk("remote ip 0x%x\n", key.remote_ipv4);
return TC_ACT_OK;
}
-SEC("ipip6_set_tunnel")
-int _ipip6_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ipip6_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key = {};
void *data = (void *)(long)skb->data;
@@ -572,7 +737,7 @@ int _ipip6_set_tunnel(struct __sk_buff *skb)
/* single length check */
if (data + sizeof(*iph) > data_end) {
- ERROR(1);
+ log_err(1);
return TC_ACT_SHOT;
}
@@ -585,34 +750,33 @@ int _ipip6_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("ipip6_get_tunnel")
-int _ipip6_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ipip6_get_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
- char fmt[] = "remote ip6 %x::%x\n";
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt), bpf_htonl(key.remote_ipv6[0]),
- bpf_htonl(key.remote_ipv6[3]));
+ bpf_printk("remote ip6 %x::%x\n", bpf_htonl(key.remote_ipv6[0]),
+ bpf_htonl(key.remote_ipv6[3]));
return TC_ACT_OK;
}
-SEC("ip6ip6_set_tunnel")
-int _ip6ip6_set_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip6ip6_set_tunnel(struct __sk_buff *skb)
{
struct bpf_tunnel_key key = {};
void *data = (void *)(long)skb->data;
@@ -622,7 +786,7 @@ int _ip6ip6_set_tunnel(struct __sk_buff *skb)
/* single length check */
if (data + sizeof(*iph) > data_end) {
- ERROR(1);
+ log_err(1);
return TC_ACT_SHOT;
}
@@ -634,45 +798,44 @@ int _ip6ip6_set_tunnel(struct __sk_buff *skb)
ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
return TC_ACT_OK;
}
-SEC("ip6ip6_get_tunnel")
-int _ip6ip6_get_tunnel(struct __sk_buff *skb)
+SEC("tc")
+int ip6ip6_get_tunnel(struct __sk_buff *skb)
{
int ret;
struct bpf_tunnel_key key;
- char fmt[] = "remote ip6 %x::%x\n";
ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
BPF_F_TUNINFO_IPV6);
if (ret < 0) {
- ERROR(ret);
+ log_err(ret);
return TC_ACT_SHOT;
}
- bpf_trace_printk(fmt, sizeof(fmt), bpf_htonl(key.remote_ipv6[0]),
- bpf_htonl(key.remote_ipv6[3]));
+ bpf_printk("remote ip6 %x::%x\n", bpf_htonl(key.remote_ipv6[0]),
+ bpf_htonl(key.remote_ipv6[3]));
return TC_ACT_OK;
}
-SEC("xfrm_get_state")
-int _xfrm_get_state(struct __sk_buff *skb)
+SEC("tc")
+int xfrm_get_state(struct __sk_buff *skb)
{
struct bpf_xfrm_state x;
- char fmt[] = "reqid %d spi 0x%x remote ip 0x%x\n";
int ret;
ret = bpf_skb_get_xfrm_state(skb, 0, &x, sizeof(x), 0);
if (ret < 0)
return TC_ACT_OK;
- bpf_trace_printk(fmt, sizeof(fmt), x.reqid, bpf_ntohl(x.spi),
- bpf_ntohl(x.remote_ipv4));
+ bpf_printk("reqid %d spi 0x%x remote ip 0x%x\n",
+ x.reqid, bpf_ntohl(x.spi),
+ bpf_ntohl(x.remote_ipv4));
return TC_ACT_OK;
}
diff --git a/tools/testing/selftests/bpf/progs/test_unpriv_bpf_disabled.c b/tools/testing/selftests/bpf/progs/test_unpriv_bpf_disabled.c
new file mode 100644
index 000000000000..fc423e43a3cd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_unpriv_bpf_disabled.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Oracle and/or its affiliates. */
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+__u32 perfbuf_val = 0;
+__u32 ringbuf_val = 0;
+
+int test_pid;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} array SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} percpu_array SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} hash SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u32);
+} percpu_hash SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+} perfbuf SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RINGBUF);
+ __uint(max_entries, 1 << 12);
+} ringbuf SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} prog_array SEC(".maps");
+
+SEC("fentry/" SYS_PREFIX "sys_nanosleep")
+int sys_nanosleep_enter(void *ctx)
+{
+ int cur_pid;
+
+ cur_pid = bpf_get_current_pid_tgid() >> 32;
+
+ if (cur_pid != test_pid)
+ return 0;
+
+ bpf_perf_event_output(ctx, &perfbuf, BPF_F_CURRENT_CPU, &perfbuf_val, sizeof(perfbuf_val));
+ bpf_ringbuf_output(&ringbuf, &ringbuf_val, sizeof(ringbuf_val), 0);
+
+ return 0;
+}
+
+SEC("perf_event")
+int handle_perf_event(void *ctx)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c b/tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c
new file mode 100644
index 000000000000..ab75522e2eeb
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Oracle and/or its affiliates. */
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+int uprobe_byname_parm1 = 0;
+int uprobe_byname_ran = 0;
+int uretprobe_byname_rc = 0;
+int uretprobe_byname_ran = 0;
+size_t uprobe_byname2_parm1 = 0;
+int uprobe_byname2_ran = 0;
+char *uretprobe_byname2_rc = NULL;
+int uretprobe_byname2_ran = 0;
+
+int test_pid;
+
+/* This program cannot auto-attach, but that should not stop other
+ * programs from attaching.
+ */
+SEC("uprobe")
+int handle_uprobe_noautoattach(struct pt_regs *ctx)
+{
+ return 0;
+}
+
+SEC("uprobe//proc/self/exe:autoattach_trigger_func")
+int handle_uprobe_byname(struct pt_regs *ctx)
+{
+ uprobe_byname_parm1 = PT_REGS_PARM1_CORE(ctx);
+ uprobe_byname_ran = 1;
+ return 0;
+}
+
+SEC("uretprobe//proc/self/exe:autoattach_trigger_func")
+int handle_uretprobe_byname(struct pt_regs *ctx)
+{
+ uretprobe_byname_rc = PT_REGS_RC_CORE(ctx);
+ uretprobe_byname_ran = 2;
+ return 0;
+}
+
+
+SEC("uprobe/libc.so.6:malloc")
+int handle_uprobe_byname2(struct pt_regs *ctx)
+{
+ int pid = bpf_get_current_pid_tgid() >> 32;
+
+ /* ignore irrelevant invocations */
+ if (test_pid != pid)
+ return 0;
+ uprobe_byname2_parm1 = PT_REGS_PARM1_CORE(ctx);
+ uprobe_byname2_ran = 3;
+ return 0;
+}
+
+SEC("uretprobe/libc.so.6:malloc")
+int handle_uretprobe_byname2(struct pt_regs *ctx)
+{
+ int pid = bpf_get_current_pid_tgid() >> 32;
+
+ /* ignore irrelevant invocations */
+ if (test_pid != pid)
+ return 0;
+ uretprobe_byname2_rc = (char *)PT_REGS_RC_CORE(ctx);
+ uretprobe_byname2_ran = 4;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_urandom_usdt.c b/tools/testing/selftests/bpf/progs/test_urandom_usdt.c
new file mode 100644
index 000000000000..3539b02bd5f7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_urandom_usdt.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/usdt.bpf.h>
+
+int urand_pid;
+
+int urand_read_without_sema_call_cnt;
+int urand_read_without_sema_buf_sz_sum;
+
+SEC("usdt/./urandom_read:urand:read_without_sema")
+int BPF_USDT(urand_read_without_sema, int iter_num, int iter_cnt, int buf_sz)
+{
+ if (urand_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&urand_read_without_sema_call_cnt, 1);
+ __sync_fetch_and_add(&urand_read_without_sema_buf_sz_sum, buf_sz);
+
+ return 0;
+}
+
+int urand_read_with_sema_call_cnt;
+int urand_read_with_sema_buf_sz_sum;
+
+SEC("usdt/./urandom_read:urand:read_with_sema")
+int BPF_USDT(urand_read_with_sema, int iter_num, int iter_cnt, int buf_sz)
+{
+ if (urand_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&urand_read_with_sema_call_cnt, 1);
+ __sync_fetch_and_add(&urand_read_with_sema_buf_sz_sum, buf_sz);
+
+ return 0;
+}
+
+int urandlib_read_without_sema_call_cnt;
+int urandlib_read_without_sema_buf_sz_sum;
+
+SEC("usdt/./liburandom_read.so:urandlib:read_without_sema")
+int BPF_USDT(urandlib_read_without_sema, int iter_num, int iter_cnt, int buf_sz)
+{
+ if (urand_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&urandlib_read_without_sema_call_cnt, 1);
+ __sync_fetch_and_add(&urandlib_read_without_sema_buf_sz_sum, buf_sz);
+
+ return 0;
+}
+
+int urandlib_read_with_sema_call_cnt;
+int urandlib_read_with_sema_buf_sz_sum;
+
+SEC("usdt/./liburandom_read.so:urandlib:read_with_sema")
+int BPF_USDT(urandlib_read_with_sema, int iter_num, int iter_cnt, int buf_sz)
+{
+ if (urand_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&urandlib_read_with_sema_call_cnt, 1);
+ __sync_fetch_and_add(&urandlib_read_with_sema_buf_sz_sum, buf_sz);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_usdt.c b/tools/testing/selftests/bpf/progs/test_usdt.c
new file mode 100644
index 000000000000..505aab9a5234
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_usdt.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/usdt.bpf.h>
+
+int my_pid;
+
+int usdt0_called;
+u64 usdt0_cookie;
+int usdt0_arg_cnt;
+int usdt0_arg_ret;
+
+SEC("usdt")
+int usdt0(struct pt_regs *ctx)
+{
+ long tmp;
+
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&usdt0_called, 1);
+
+ usdt0_cookie = bpf_usdt_cookie(ctx);
+ usdt0_arg_cnt = bpf_usdt_arg_cnt(ctx);
+ /* should return -ENOENT for any arg_num */
+ usdt0_arg_ret = bpf_usdt_arg(ctx, bpf_get_prandom_u32(), &tmp);
+ return 0;
+}
+
+int usdt3_called;
+u64 usdt3_cookie;
+int usdt3_arg_cnt;
+int usdt3_arg_rets[3];
+u64 usdt3_args[3];
+
+SEC("usdt//proc/self/exe:test:usdt3")
+int usdt3(struct pt_regs *ctx)
+{
+ long tmp;
+
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&usdt3_called, 1);
+
+ usdt3_cookie = bpf_usdt_cookie(ctx);
+ usdt3_arg_cnt = bpf_usdt_arg_cnt(ctx);
+
+ usdt3_arg_rets[0] = bpf_usdt_arg(ctx, 0, &tmp);
+ usdt3_args[0] = (int)tmp;
+
+ usdt3_arg_rets[1] = bpf_usdt_arg(ctx, 1, &tmp);
+ usdt3_args[1] = (long)tmp;
+
+ usdt3_arg_rets[2] = bpf_usdt_arg(ctx, 2, &tmp);
+ usdt3_args[2] = (uintptr_t)tmp;
+
+ return 0;
+}
+
+int usdt12_called;
+u64 usdt12_cookie;
+int usdt12_arg_cnt;
+u64 usdt12_args[12];
+
+SEC("usdt//proc/self/exe:test:usdt12")
+int BPF_USDT(usdt12, int a1, int a2, long a3, long a4, unsigned a5,
+ long a6, __u64 a7, uintptr_t a8, int a9, short a10,
+ short a11, signed char a12)
+{
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&usdt12_called, 1);
+
+ usdt12_cookie = bpf_usdt_cookie(ctx);
+ usdt12_arg_cnt = bpf_usdt_arg_cnt(ctx);
+
+ usdt12_args[0] = a1;
+ usdt12_args[1] = a2;
+ usdt12_args[2] = a3;
+ usdt12_args[3] = a4;
+ usdt12_args[4] = a5;
+ usdt12_args[5] = a6;
+ usdt12_args[6] = a7;
+ usdt12_args[7] = a8;
+ usdt12_args[8] = a9;
+ usdt12_args[9] = a10;
+ usdt12_args[10] = a11;
+ usdt12_args[11] = a12;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_usdt_multispec.c b/tools/testing/selftests/bpf/progs/test_usdt_multispec.c
new file mode 100644
index 000000000000..aa6de32b50d1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_usdt_multispec.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/usdt.bpf.h>
+
+/* this file is linked together with test_usdt.c to validate that usdt.bpf.h
+ * can be included in multiple .bpf.c files forming single final BPF object
+ * file
+ */
+
+extern int my_pid;
+
+int usdt_100_called;
+int usdt_100_sum;
+
+SEC("usdt//proc/self/exe:test:usdt_100")
+int BPF_USDT(usdt_100, int x)
+{
+ long tmp;
+
+ if (my_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ __sync_fetch_and_add(&usdt_100_called, 1);
+ __sync_fetch_and_add(&usdt_100_sum, x);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_varlen.c b/tools/testing/selftests/bpf/progs/test_varlen.c
index 913acdffd90f..3987ff174f1f 100644
--- a/tools/testing/selftests/bpf/progs/test_varlen.c
+++ b/tools/testing/selftests/bpf/progs/test_varlen.c
@@ -41,20 +41,20 @@ int handler64_unsigned(void *regs)
{
int pid = bpf_get_current_pid_tgid() >> 32;
void *payload = payload1;
- u64 len;
+ long len;
/* ignore irrelevant invocations */
if (test_pid != pid || !capture)
return 0;
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]);
- if (len <= MAX_LEN) {
+ if (len >= 0) {
payload += len;
payload1_len1 = len;
}
len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]);
- if (len <= MAX_LEN) {
+ if (len >= 0) {
payload += len;
payload1_len2 = len;
}
@@ -123,7 +123,7 @@ int handler32_signed(void *regs)
{
int pid = bpf_get_current_pid_tgid() >> 32;
void *payload = payload4;
- int len;
+ long len;
/* ignore irrelevant invocations */
if (test_pid != pid || !capture)
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
index 596c4e71bf3a..ba48fcb98ab2 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
@@ -239,7 +239,7 @@ bool parse_udp(void *data, void *data_end,
udp = data + off;
if (udp + 1 > data_end)
- return 0;
+ return false;
if (!is_icmp) {
pckt->flow.port16[0] = udp->source;
pckt->flow.port16[1] = udp->dest;
@@ -247,7 +247,7 @@ bool parse_udp(void *data, void *data_end,
pckt->flow.port16[0] = udp->dest;
pckt->flow.port16[1] = udp->source;
}
- return 1;
+ return true;
}
static __attribute__ ((noinline))
@@ -261,7 +261,7 @@ bool parse_tcp(void *data, void *data_end,
tcp = data + off;
if (tcp + 1 > data_end)
- return 0;
+ return false;
if (tcp->syn)
pckt->flags |= (1 << 1);
if (!is_icmp) {
@@ -271,7 +271,7 @@ bool parse_tcp(void *data, void *data_end,
pckt->flow.port16[0] = tcp->dest;
pckt->flow.port16[1] = tcp->source;
}
- return 1;
+ return true;
}
static __attribute__ ((noinline))
@@ -287,7 +287,7 @@ bool encap_v6(struct xdp_md *xdp, struct ctl_value *cval,
void *data;
if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr)))
- return 0;
+ return false;
data = (void *)(long)xdp->data;
data_end = (void *)(long)xdp->data_end;
new_eth = data;
@@ -295,7 +295,7 @@ bool encap_v6(struct xdp_md *xdp, struct ctl_value *cval,
old_eth = data + sizeof(struct ipv6hdr);
if (new_eth + 1 > data_end ||
old_eth + 1 > data_end || ip6h + 1 > data_end)
- return 0;
+ return false;
memcpy(new_eth->eth_dest, cval->mac, 6);
memcpy(new_eth->eth_source, old_eth->eth_dest, 6);
new_eth->eth_proto = 56710;
@@ -314,7 +314,7 @@ bool encap_v6(struct xdp_md *xdp, struct ctl_value *cval,
ip6h->saddr.in6_u.u6_addr32[2] = 3;
ip6h->saddr.in6_u.u6_addr32[3] = ip_suffix;
memcpy(ip6h->daddr.in6_u.u6_addr32, dst->dstv6, 16);
- return 1;
+ return true;
}
static __attribute__ ((noinline))
@@ -335,7 +335,7 @@ bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval,
ip_suffix <<= 15;
ip_suffix ^= pckt->flow.src;
if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr)))
- return 0;
+ return false;
data = (void *)(long)xdp->data;
data_end = (void *)(long)xdp->data_end;
new_eth = data;
@@ -343,7 +343,7 @@ bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval,
old_eth = data + sizeof(struct iphdr);
if (new_eth + 1 > data_end ||
old_eth + 1 > data_end || iph + 1 > data_end)
- return 0;
+ return false;
memcpy(new_eth->eth_dest, cval->mac, 6);
memcpy(new_eth->eth_source, old_eth->eth_dest, 6);
new_eth->eth_proto = 8;
@@ -367,8 +367,8 @@ bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval,
csum += *next_iph_u16++;
iph->check = ~((csum & 0xffff) + (csum >> 16));
if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr)))
- return 0;
- return 1;
+ return false;
+ return true;
}
static __attribute__ ((noinline))
@@ -386,10 +386,10 @@ bool decap_v6(struct xdp_md *xdp, void **data, void **data_end, bool inner_v4)
else
new_eth->eth_proto = 56710;
if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct ipv6hdr)))
- return 0;
+ return false;
*data = (void *)(long)xdp->data;
*data_end = (void *)(long)xdp->data_end;
- return 1;
+ return true;
}
static __attribute__ ((noinline))
@@ -404,10 +404,10 @@ bool decap_v4(struct xdp_md *xdp, void **data, void **data_end)
memcpy(new_eth->eth_dest, old_eth->eth_dest, 6);
new_eth->eth_proto = 8;
if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr)))
- return 0;
+ return false;
*data = (void *)(long)xdp->data;
*data_end = (void *)(long)xdp->data_end;
- return 1;
+ return true;
}
static __attribute__ ((noinline))
@@ -564,22 +564,22 @@ static bool get_packet_dst(struct real_definition **real,
hash = get_packet_hash(pckt, hash_16bytes);
if (hash != 0x358459b7 /* jhash of ipv4 packet */ &&
hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
- return 0;
+ return false;
key = 2 * vip_info->vip_num + hash % 2;
real_pos = bpf_map_lookup_elem(&ch_rings, &key);
if (!real_pos)
- return 0;
+ return false;
key = *real_pos;
*real = bpf_map_lookup_elem(&reals, &key);
if (!(*real))
- return 0;
+ return false;
if (!(vip_info->flags & (1 << 1))) {
__u32 conn_rate_key = 512 + 2;
struct lb_stats *conn_rate_stats =
bpf_map_lookup_elem(&stats, &conn_rate_key);
if (!conn_rate_stats)
- return 1;
+ return true;
cur_time = bpf_ktime_get_ns();
if ((cur_time - conn_rate_stats->v2) >> 32 > 0xffFFFF) {
conn_rate_stats->v1 = 1;
@@ -587,14 +587,14 @@ static bool get_packet_dst(struct real_definition **real,
} else {
conn_rate_stats->v1 += 1;
if (conn_rate_stats->v1 >= 1)
- return 1;
+ return true;
}
if (pckt->flow.proto == IPPROTO_UDP)
new_dst_lru.atime = cur_time;
new_dst_lru.pos = key;
bpf_map_update_elem(lru_map, &pckt->flow, &new_dst_lru, 0);
}
- return 1;
+ return true;
}
__attribute__ ((noinline))
diff --git a/tools/testing/selftests/bpf/progs/trigger_bench.c b/tools/testing/selftests/bpf/progs/trigger_bench.c
index 2ab049b54d6c..694e7cec1823 100644
--- a/tools/testing/selftests/bpf/progs/trigger_bench.c
+++ b/tools/testing/selftests/bpf/progs/trigger_bench.c
@@ -54,7 +54,7 @@ int bench_trigger_fmodret(void *ctx)
return -22;
}
-SEC("uprobe/self/uprobe_target")
+SEC("uprobe")
int bench_trigger_uprobe(void *ctx)
{
__sync_add_and_fetch(&hits, 1);
diff --git a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
new file mode 100644
index 000000000000..736686e903f6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c
@@ -0,0 +1,843 @@
+// SPDX-License-Identifier: LGPL-2.1 OR BSD-2-Clause
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <asm/errno.h>
+
+#define TC_ACT_OK 0
+#define TC_ACT_SHOT 2
+
+#define NSEC_PER_SEC 1000000000L
+
+#define ETH_ALEN 6
+#define ETH_P_IP 0x0800
+#define ETH_P_IPV6 0x86DD
+
+#define tcp_flag_word(tp) (((union tcp_word_hdr *)(tp))->words[3])
+
+#define IP_DF 0x4000
+#define IP_MF 0x2000
+#define IP_OFFSET 0x1fff
+
+#define NEXTHDR_TCP 6
+
+#define TCPOPT_NOP 1
+#define TCPOPT_EOL 0
+#define TCPOPT_MSS 2
+#define TCPOPT_WINDOW 3
+#define TCPOPT_SACK_PERM 4
+#define TCPOPT_TIMESTAMP 8
+
+#define TCPOLEN_MSS 4
+#define TCPOLEN_WINDOW 3
+#define TCPOLEN_SACK_PERM 2
+#define TCPOLEN_TIMESTAMP 10
+
+#define TCP_TS_HZ 1000
+#define TS_OPT_WSCALE_MASK 0xf
+#define TS_OPT_SACK (1 << 4)
+#define TS_OPT_ECN (1 << 5)
+#define TSBITS 6
+#define TSMASK (((__u32)1 << TSBITS) - 1)
+#define TCP_MAX_WSCALE 14U
+
+#define IPV4_MAXLEN 60
+#define TCP_MAXLEN 60
+
+#define DEFAULT_MSS4 1460
+#define DEFAULT_MSS6 1440
+#define DEFAULT_WSCALE 7
+#define DEFAULT_TTL 64
+#define MAX_ALLOWED_PORTS 8
+
+#define swap(a, b) \
+ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __attribute__((__packed__)) *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x; \
+})
+
+#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u64);
+ __uint(max_entries, 2);
+} values SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u16);
+ __uint(max_entries, MAX_ALLOWED_PORTS);
+} allowed_ports SEC(".maps");
+
+/* Some symbols defined in net/netfilter/nf_conntrack_bpf.c are unavailable in
+ * vmlinux.h if CONFIG_NF_CONNTRACK=m, so they are redefined locally.
+ */
+
+struct bpf_ct_opts___local {
+ s32 netns_id;
+ s32 error;
+ u8 l4proto;
+ u8 dir;
+ u8 reserved[2];
+} __attribute__((preserve_access_index));
+
+#define BPF_F_CURRENT_NETNS (-1)
+
+extern struct nf_conn *bpf_xdp_ct_lookup(struct xdp_md *xdp_ctx,
+ struct bpf_sock_tuple *bpf_tuple,
+ __u32 len_tuple,
+ struct bpf_ct_opts___local *opts,
+ __u32 len_opts) __ksym;
+
+extern struct nf_conn *bpf_skb_ct_lookup(struct __sk_buff *skb_ctx,
+ struct bpf_sock_tuple *bpf_tuple,
+ u32 len_tuple,
+ struct bpf_ct_opts___local *opts,
+ u32 len_opts) __ksym;
+
+extern void bpf_ct_release(struct nf_conn *ct) __ksym;
+
+static __always_inline void swap_eth_addr(__u8 *a, __u8 *b)
+{
+ __u8 tmp[ETH_ALEN];
+
+ __builtin_memcpy(tmp, a, ETH_ALEN);
+ __builtin_memcpy(a, b, ETH_ALEN);
+ __builtin_memcpy(b, tmp, ETH_ALEN);
+}
+
+static __always_inline __u16 csum_fold(__u32 csum)
+{
+ csum = (csum & 0xffff) + (csum >> 16);
+ csum = (csum & 0xffff) + (csum >> 16);
+ return (__u16)~csum;
+}
+
+static __always_inline __u16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+ __u32 len, __u8 proto,
+ __u32 csum)
+{
+ __u64 s = csum;
+
+ s += (__u32)saddr;
+ s += (__u32)daddr;
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ s += proto + len;
+#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ s += (proto + len) << 8;
+#else
+#error Unknown endian
+#endif
+ s = (s & 0xffffffff) + (s >> 32);
+ s = (s & 0xffffffff) + (s >> 32);
+
+ return csum_fold((__u32)s);
+}
+
+static __always_inline __u16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, __u8 proto, __u32 csum)
+{
+ __u64 sum = csum;
+ int i;
+
+#pragma unroll
+ for (i = 0; i < 4; i++)
+ sum += (__u32)saddr->in6_u.u6_addr32[i];
+
+#pragma unroll
+ for (i = 0; i < 4; i++)
+ sum += (__u32)daddr->in6_u.u6_addr32[i];
+
+ /* Don't combine additions to avoid 32-bit overflow. */
+ sum += bpf_htonl(len);
+ sum += bpf_htonl(proto);
+
+ sum = (sum & 0xffffffff) + (sum >> 32);
+ sum = (sum & 0xffffffff) + (sum >> 32);
+
+ return csum_fold((__u32)sum);
+}
+
+static __always_inline __u64 tcp_clock_ns(void)
+{
+ return bpf_ktime_get_ns();
+}
+
+static __always_inline __u32 tcp_ns_to_ts(__u64 ns)
+{
+ return ns / (NSEC_PER_SEC / TCP_TS_HZ);
+}
+
+static __always_inline __u32 tcp_time_stamp_raw(void)
+{
+ return tcp_ns_to_ts(tcp_clock_ns());
+}
+
+struct tcpopt_context {
+ __u8 *ptr;
+ __u8 *end;
+ void *data_end;
+ __be32 *tsecr;
+ __u8 wscale;
+ bool option_timestamp;
+ bool option_sack;
+};
+
+static int tscookie_tcpopt_parse(struct tcpopt_context *ctx)
+{
+ __u8 opcode, opsize;
+
+ if (ctx->ptr >= ctx->end)
+ return 1;
+ if (ctx->ptr >= ctx->data_end)
+ return 1;
+
+ opcode = ctx->ptr[0];
+
+ if (opcode == TCPOPT_EOL)
+ return 1;
+ if (opcode == TCPOPT_NOP) {
+ ++ctx->ptr;
+ return 0;
+ }
+
+ if (ctx->ptr + 1 >= ctx->end)
+ return 1;
+ if (ctx->ptr + 1 >= ctx->data_end)
+ return 1;
+ opsize = ctx->ptr[1];
+ if (opsize < 2)
+ return 1;
+
+ if (ctx->ptr + opsize > ctx->end)
+ return 1;
+
+ switch (opcode) {
+ case TCPOPT_WINDOW:
+ if (opsize == TCPOLEN_WINDOW && ctx->ptr + TCPOLEN_WINDOW <= ctx->data_end)
+ ctx->wscale = ctx->ptr[2] < TCP_MAX_WSCALE ? ctx->ptr[2] : TCP_MAX_WSCALE;
+ break;
+ case TCPOPT_TIMESTAMP:
+ if (opsize == TCPOLEN_TIMESTAMP && ctx->ptr + TCPOLEN_TIMESTAMP <= ctx->data_end) {
+ ctx->option_timestamp = true;
+ /* Client's tsval becomes our tsecr. */
+ *ctx->tsecr = get_unaligned((__be32 *)(ctx->ptr + 2));
+ }
+ break;
+ case TCPOPT_SACK_PERM:
+ if (opsize == TCPOLEN_SACK_PERM)
+ ctx->option_sack = true;
+ break;
+ }
+
+ ctx->ptr += opsize;
+
+ return 0;
+}
+
+static int tscookie_tcpopt_parse_batch(__u32 index, void *context)
+{
+ int i;
+
+ for (i = 0; i < 7; i++)
+ if (tscookie_tcpopt_parse(context))
+ return 1;
+ return 0;
+}
+
+static __always_inline bool tscookie_init(struct tcphdr *tcp_header,
+ __u16 tcp_len, __be32 *tsval,
+ __be32 *tsecr, void *data_end)
+{
+ struct tcpopt_context loop_ctx = {
+ .ptr = (__u8 *)(tcp_header + 1),
+ .end = (__u8 *)tcp_header + tcp_len,
+ .data_end = data_end,
+ .tsecr = tsecr,
+ .wscale = TS_OPT_WSCALE_MASK,
+ .option_timestamp = false,
+ .option_sack = false,
+ };
+ u32 cookie;
+
+ bpf_loop(6, tscookie_tcpopt_parse_batch, &loop_ctx, 0);
+
+ if (!loop_ctx.option_timestamp)
+ return false;
+
+ cookie = tcp_time_stamp_raw() & ~TSMASK;
+ cookie |= loop_ctx.wscale & TS_OPT_WSCALE_MASK;
+ if (loop_ctx.option_sack)
+ cookie |= TS_OPT_SACK;
+ if (tcp_header->ece && tcp_header->cwr)
+ cookie |= TS_OPT_ECN;
+ *tsval = bpf_htonl(cookie);
+
+ return true;
+}
+
+static __always_inline void values_get_tcpipopts(__u16 *mss, __u8 *wscale,
+ __u8 *ttl, bool ipv6)
+{
+ __u32 key = 0;
+ __u64 *value;
+
+ value = bpf_map_lookup_elem(&values, &key);
+ if (value && *value != 0) {
+ if (ipv6)
+ *mss = (*value >> 32) & 0xffff;
+ else
+ *mss = *value & 0xffff;
+ *wscale = (*value >> 16) & 0xf;
+ *ttl = (*value >> 24) & 0xff;
+ return;
+ }
+
+ *mss = ipv6 ? DEFAULT_MSS6 : DEFAULT_MSS4;
+ *wscale = DEFAULT_WSCALE;
+ *ttl = DEFAULT_TTL;
+}
+
+static __always_inline void values_inc_synacks(void)
+{
+ __u32 key = 1;
+ __u32 *value;
+
+ value = bpf_map_lookup_elem(&values, &key);
+ if (value)
+ __sync_fetch_and_add(value, 1);
+}
+
+static __always_inline bool check_port_allowed(__u16 port)
+{
+ __u32 i;
+
+ for (i = 0; i < MAX_ALLOWED_PORTS; i++) {
+ __u32 key = i;
+ __u16 *value;
+
+ value = bpf_map_lookup_elem(&allowed_ports, &key);
+
+ if (!value)
+ break;
+ /* 0 is a terminator value. Check it first to avoid matching on
+ * a forbidden port == 0 and returning true.
+ */
+ if (*value == 0)
+ break;
+
+ if (*value == port)
+ return true;
+ }
+
+ return false;
+}
+
+struct header_pointers {
+ struct ethhdr *eth;
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ struct tcphdr *tcp;
+ __u16 tcp_len;
+};
+
+static __always_inline int tcp_dissect(void *data, void *data_end,
+ struct header_pointers *hdr)
+{
+ hdr->eth = data;
+ if (hdr->eth + 1 > data_end)
+ return XDP_DROP;
+
+ switch (bpf_ntohs(hdr->eth->h_proto)) {
+ case ETH_P_IP:
+ hdr->ipv6 = NULL;
+
+ hdr->ipv4 = (void *)hdr->eth + sizeof(*hdr->eth);
+ if (hdr->ipv4 + 1 > data_end)
+ return XDP_DROP;
+ if (hdr->ipv4->ihl * 4 < sizeof(*hdr->ipv4))
+ return XDP_DROP;
+ if (hdr->ipv4->version != 4)
+ return XDP_DROP;
+
+ if (hdr->ipv4->protocol != IPPROTO_TCP)
+ return XDP_PASS;
+
+ hdr->tcp = (void *)hdr->ipv4 + hdr->ipv4->ihl * 4;
+ break;
+ case ETH_P_IPV6:
+ hdr->ipv4 = NULL;
+
+ hdr->ipv6 = (void *)hdr->eth + sizeof(*hdr->eth);
+ if (hdr->ipv6 + 1 > data_end)
+ return XDP_DROP;
+ if (hdr->ipv6->version != 6)
+ return XDP_DROP;
+
+ /* XXX: Extension headers are not supported and could circumvent
+ * XDP SYN flood protection.
+ */
+ if (hdr->ipv6->nexthdr != NEXTHDR_TCP)
+ return XDP_PASS;
+
+ hdr->tcp = (void *)hdr->ipv6 + sizeof(*hdr->ipv6);
+ break;
+ default:
+ /* XXX: VLANs will circumvent XDP SYN flood protection. */
+ return XDP_PASS;
+ }
+
+ if (hdr->tcp + 1 > data_end)
+ return XDP_DROP;
+ hdr->tcp_len = hdr->tcp->doff * 4;
+ if (hdr->tcp_len < sizeof(*hdr->tcp))
+ return XDP_DROP;
+
+ return XDP_TX;
+}
+
+static __always_inline int tcp_lookup(void *ctx, struct header_pointers *hdr, bool xdp)
+{
+ struct bpf_ct_opts___local ct_lookup_opts = {
+ .netns_id = BPF_F_CURRENT_NETNS,
+ .l4proto = IPPROTO_TCP,
+ };
+ struct bpf_sock_tuple tup = {};
+ struct nf_conn *ct;
+ __u32 tup_size;
+
+ if (hdr->ipv4) {
+ /* TCP doesn't normally use fragments, and XDP can't reassemble
+ * them.
+ */
+ if ((hdr->ipv4->frag_off & bpf_htons(IP_DF | IP_MF | IP_OFFSET)) != bpf_htons(IP_DF))
+ return XDP_DROP;
+
+ tup.ipv4.saddr = hdr->ipv4->saddr;
+ tup.ipv4.daddr = hdr->ipv4->daddr;
+ tup.ipv4.sport = hdr->tcp->source;
+ tup.ipv4.dport = hdr->tcp->dest;
+ tup_size = sizeof(tup.ipv4);
+ } else if (hdr->ipv6) {
+ __builtin_memcpy(tup.ipv6.saddr, &hdr->ipv6->saddr, sizeof(tup.ipv6.saddr));
+ __builtin_memcpy(tup.ipv6.daddr, &hdr->ipv6->daddr, sizeof(tup.ipv6.daddr));
+ tup.ipv6.sport = hdr->tcp->source;
+ tup.ipv6.dport = hdr->tcp->dest;
+ tup_size = sizeof(tup.ipv6);
+ } else {
+ /* The verifier can't track that either ipv4 or ipv6 is not
+ * NULL.
+ */
+ return XDP_ABORTED;
+ }
+ if (xdp)
+ ct = bpf_xdp_ct_lookup(ctx, &tup, tup_size, &ct_lookup_opts, sizeof(ct_lookup_opts));
+ else
+ ct = bpf_skb_ct_lookup(ctx, &tup, tup_size, &ct_lookup_opts, sizeof(ct_lookup_opts));
+ if (ct) {
+ unsigned long status = ct->status;
+
+ bpf_ct_release(ct);
+ if (status & IPS_CONFIRMED_BIT)
+ return XDP_PASS;
+ } else if (ct_lookup_opts.error != -ENOENT) {
+ return XDP_ABORTED;
+ }
+
+ /* error == -ENOENT || !(status & IPS_CONFIRMED_BIT) */
+ return XDP_TX;
+}
+
+static __always_inline __u8 tcp_mkoptions(__be32 *buf, __be32 *tsopt, __u16 mss,
+ __u8 wscale)
+{
+ __be32 *start = buf;
+
+ *buf++ = bpf_htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
+
+ if (!tsopt)
+ return buf - start;
+
+ if (tsopt[0] & bpf_htonl(1 << 4))
+ *buf++ = bpf_htonl((TCPOPT_SACK_PERM << 24) |
+ (TCPOLEN_SACK_PERM << 16) |
+ (TCPOPT_TIMESTAMP << 8) |
+ TCPOLEN_TIMESTAMP);
+ else
+ *buf++ = bpf_htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_TIMESTAMP << 8) |
+ TCPOLEN_TIMESTAMP);
+ *buf++ = tsopt[0];
+ *buf++ = tsopt[1];
+
+ if ((tsopt[0] & bpf_htonl(0xf)) != bpf_htonl(0xf))
+ *buf++ = bpf_htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_WINDOW << 16) |
+ (TCPOLEN_WINDOW << 8) |
+ wscale);
+
+ return buf - start;
+}
+
+static __always_inline void tcp_gen_synack(struct tcphdr *tcp_header,
+ __u32 cookie, __be32 *tsopt,
+ __u16 mss, __u8 wscale)
+{
+ void *tcp_options;
+
+ tcp_flag_word(tcp_header) = TCP_FLAG_SYN | TCP_FLAG_ACK;
+ if (tsopt && (tsopt[0] & bpf_htonl(1 << 5)))
+ tcp_flag_word(tcp_header) |= TCP_FLAG_ECE;
+ tcp_header->doff = 5; /* doff is part of tcp_flag_word. */
+ swap(tcp_header->source, tcp_header->dest);
+ tcp_header->ack_seq = bpf_htonl(bpf_ntohl(tcp_header->seq) + 1);
+ tcp_header->seq = bpf_htonl(cookie);
+ tcp_header->window = 0;
+ tcp_header->urg_ptr = 0;
+ tcp_header->check = 0; /* Calculate checksum later. */
+
+ tcp_options = (void *)(tcp_header + 1);
+ tcp_header->doff += tcp_mkoptions(tcp_options, tsopt, mss, wscale);
+}
+
+static __always_inline void tcpv4_gen_synack(struct header_pointers *hdr,
+ __u32 cookie, __be32 *tsopt)
+{
+ __u8 wscale;
+ __u16 mss;
+ __u8 ttl;
+
+ values_get_tcpipopts(&mss, &wscale, &ttl, false);
+
+ swap_eth_addr(hdr->eth->h_source, hdr->eth->h_dest);
+
+ swap(hdr->ipv4->saddr, hdr->ipv4->daddr);
+ hdr->ipv4->check = 0; /* Calculate checksum later. */
+ hdr->ipv4->tos = 0;
+ hdr->ipv4->id = 0;
+ hdr->ipv4->ttl = ttl;
+
+ tcp_gen_synack(hdr->tcp, cookie, tsopt, mss, wscale);
+
+ hdr->tcp_len = hdr->tcp->doff * 4;
+ hdr->ipv4->tot_len = bpf_htons(sizeof(*hdr->ipv4) + hdr->tcp_len);
+}
+
+static __always_inline void tcpv6_gen_synack(struct header_pointers *hdr,
+ __u32 cookie, __be32 *tsopt)
+{
+ __u8 wscale;
+ __u16 mss;
+ __u8 ttl;
+
+ values_get_tcpipopts(&mss, &wscale, &ttl, true);
+
+ swap_eth_addr(hdr->eth->h_source, hdr->eth->h_dest);
+
+ swap(hdr->ipv6->saddr, hdr->ipv6->daddr);
+ *(__be32 *)hdr->ipv6 = bpf_htonl(0x60000000);
+ hdr->ipv6->hop_limit = ttl;
+
+ tcp_gen_synack(hdr->tcp, cookie, tsopt, mss, wscale);
+
+ hdr->tcp_len = hdr->tcp->doff * 4;
+ hdr->ipv6->payload_len = bpf_htons(hdr->tcp_len);
+}
+
+static __always_inline int syncookie_handle_syn(struct header_pointers *hdr,
+ void *ctx,
+ void *data, void *data_end,
+ bool xdp)
+{
+ __u32 old_pkt_size, new_pkt_size;
+ /* Unlike clang 10, clang 11 and 12 generate code that doesn't pass the
+ * BPF verifier if tsopt is not volatile. Volatile forces it to store
+ * the pointer value and use it directly, otherwise tcp_mkoptions is
+ * (mis)compiled like this:
+ * if (!tsopt)
+ * return buf - start;
+ * reg = stored_return_value_of_tscookie_init;
+ * if (reg)
+ * tsopt = tsopt_buf;
+ * else
+ * tsopt = NULL;
+ * ...
+ * *buf++ = tsopt[1];
+ * It creates a dead branch where tsopt is assigned NULL, but the
+ * verifier can't prove it's dead and blocks the program.
+ */
+ __be32 * volatile tsopt = NULL;
+ __be32 tsopt_buf[2] = {};
+ __u16 ip_len;
+ __u32 cookie;
+ __s64 value;
+
+ /* Checksum is not yet verified, but both checksum failure and TCP
+ * header checks return XDP_DROP, so the order doesn't matter.
+ */
+ if (hdr->tcp->fin || hdr->tcp->rst)
+ return XDP_DROP;
+
+ /* Issue SYN cookies on allowed ports, drop SYN packets on blocked
+ * ports.
+ */
+ if (!check_port_allowed(bpf_ntohs(hdr->tcp->dest)))
+ return XDP_DROP;
+
+ if (hdr->ipv4) {
+ /* Check the IPv4 and TCP checksums before creating a SYNACK. */
+ value = bpf_csum_diff(0, 0, (void *)hdr->ipv4, hdr->ipv4->ihl * 4, 0);
+ if (value < 0)
+ return XDP_ABORTED;
+ if (csum_fold(value) != 0)
+ return XDP_DROP; /* Bad IPv4 checksum. */
+
+ value = bpf_csum_diff(0, 0, (void *)hdr->tcp, hdr->tcp_len, 0);
+ if (value < 0)
+ return XDP_ABORTED;
+ if (csum_tcpudp_magic(hdr->ipv4->saddr, hdr->ipv4->daddr,
+ hdr->tcp_len, IPPROTO_TCP, value) != 0)
+ return XDP_DROP; /* Bad TCP checksum. */
+
+ ip_len = sizeof(*hdr->ipv4);
+
+ value = bpf_tcp_raw_gen_syncookie_ipv4(hdr->ipv4, hdr->tcp,
+ hdr->tcp_len);
+ } else if (hdr->ipv6) {
+ /* Check the TCP checksum before creating a SYNACK. */
+ value = bpf_csum_diff(0, 0, (void *)hdr->tcp, hdr->tcp_len, 0);
+ if (value < 0)
+ return XDP_ABORTED;
+ if (csum_ipv6_magic(&hdr->ipv6->saddr, &hdr->ipv6->daddr,
+ hdr->tcp_len, IPPROTO_TCP, value) != 0)
+ return XDP_DROP; /* Bad TCP checksum. */
+
+ ip_len = sizeof(*hdr->ipv6);
+
+ value = bpf_tcp_raw_gen_syncookie_ipv6(hdr->ipv6, hdr->tcp,
+ hdr->tcp_len);
+ } else {
+ return XDP_ABORTED;
+ }
+
+ if (value < 0)
+ return XDP_ABORTED;
+ cookie = (__u32)value;
+
+ if (tscookie_init((void *)hdr->tcp, hdr->tcp_len,
+ &tsopt_buf[0], &tsopt_buf[1], data_end))
+ tsopt = tsopt_buf;
+
+ /* Check that there is enough space for a SYNACK. It also covers
+ * the check that the destination of the __builtin_memmove below
+ * doesn't overflow.
+ */
+ if (data + sizeof(*hdr->eth) + ip_len + TCP_MAXLEN > data_end)
+ return XDP_ABORTED;
+
+ if (hdr->ipv4) {
+ if (hdr->ipv4->ihl * 4 > sizeof(*hdr->ipv4)) {
+ struct tcphdr *new_tcp_header;
+
+ new_tcp_header = data + sizeof(*hdr->eth) + sizeof(*hdr->ipv4);
+ __builtin_memmove(new_tcp_header, hdr->tcp, sizeof(*hdr->tcp));
+ hdr->tcp = new_tcp_header;
+
+ hdr->ipv4->ihl = sizeof(*hdr->ipv4) / 4;
+ }
+
+ tcpv4_gen_synack(hdr, cookie, tsopt);
+ } else if (hdr->ipv6) {
+ tcpv6_gen_synack(hdr, cookie, tsopt);
+ } else {
+ return XDP_ABORTED;
+ }
+
+ /* Recalculate checksums. */
+ hdr->tcp->check = 0;
+ value = bpf_csum_diff(0, 0, (void *)hdr->tcp, hdr->tcp_len, 0);
+ if (value < 0)
+ return XDP_ABORTED;
+ if (hdr->ipv4) {
+ hdr->tcp->check = csum_tcpudp_magic(hdr->ipv4->saddr,
+ hdr->ipv4->daddr,
+ hdr->tcp_len,
+ IPPROTO_TCP,
+ value);
+
+ hdr->ipv4->check = 0;
+ value = bpf_csum_diff(0, 0, (void *)hdr->ipv4, sizeof(*hdr->ipv4), 0);
+ if (value < 0)
+ return XDP_ABORTED;
+ hdr->ipv4->check = csum_fold(value);
+ } else if (hdr->ipv6) {
+ hdr->tcp->check = csum_ipv6_magic(&hdr->ipv6->saddr,
+ &hdr->ipv6->daddr,
+ hdr->tcp_len,
+ IPPROTO_TCP,
+ value);
+ } else {
+ return XDP_ABORTED;
+ }
+
+ /* Set the new packet size. */
+ old_pkt_size = data_end - data;
+ new_pkt_size = sizeof(*hdr->eth) + ip_len + hdr->tcp->doff * 4;
+ if (xdp) {
+ if (bpf_xdp_adjust_tail(ctx, new_pkt_size - old_pkt_size))
+ return XDP_ABORTED;
+ } else {
+ if (bpf_skb_change_tail(ctx, new_pkt_size, 0))
+ return XDP_ABORTED;
+ }
+
+ values_inc_synacks();
+
+ return XDP_TX;
+}
+
+static __always_inline int syncookie_handle_ack(struct header_pointers *hdr)
+{
+ int err;
+
+ if (hdr->tcp->rst)
+ return XDP_DROP;
+
+ if (hdr->ipv4)
+ err = bpf_tcp_raw_check_syncookie_ipv4(hdr->ipv4, hdr->tcp);
+ else if (hdr->ipv6)
+ err = bpf_tcp_raw_check_syncookie_ipv6(hdr->ipv6, hdr->tcp);
+ else
+ return XDP_ABORTED;
+ if (err)
+ return XDP_DROP;
+
+ return XDP_PASS;
+}
+
+static __always_inline int syncookie_part1(void *ctx, void *data, void *data_end,
+ struct header_pointers *hdr, bool xdp)
+{
+ int ret;
+
+ ret = tcp_dissect(data, data_end, hdr);
+ if (ret != XDP_TX)
+ return ret;
+
+ ret = tcp_lookup(ctx, hdr, xdp);
+ if (ret != XDP_TX)
+ return ret;
+
+ /* Packet is TCP and doesn't belong to an established connection. */
+
+ if ((hdr->tcp->syn ^ hdr->tcp->ack) != 1)
+ return XDP_DROP;
+
+ /* Grow the TCP header to TCP_MAXLEN to be able to pass any hdr->tcp_len
+ * to bpf_tcp_raw_gen_syncookie_ipv{4,6} and pass the verifier.
+ */
+ if (xdp) {
+ if (bpf_xdp_adjust_tail(ctx, TCP_MAXLEN - hdr->tcp_len))
+ return XDP_ABORTED;
+ } else {
+ /* Without volatile the verifier throws this error:
+ * R9 32-bit pointer arithmetic prohibited
+ */
+ volatile u64 old_len = data_end - data;
+
+ if (bpf_skb_change_tail(ctx, old_len + TCP_MAXLEN - hdr->tcp_len, 0))
+ return XDP_ABORTED;
+ }
+
+ return XDP_TX;
+}
+
+static __always_inline int syncookie_part2(void *ctx, void *data, void *data_end,
+ struct header_pointers *hdr, bool xdp)
+{
+ if (hdr->ipv4) {
+ hdr->eth = data;
+ hdr->ipv4 = (void *)hdr->eth + sizeof(*hdr->eth);
+ /* IPV4_MAXLEN is needed when calculating checksum.
+ * At least sizeof(struct iphdr) is needed here to access ihl.
+ */
+ if ((void *)hdr->ipv4 + IPV4_MAXLEN > data_end)
+ return XDP_ABORTED;
+ hdr->tcp = (void *)hdr->ipv4 + hdr->ipv4->ihl * 4;
+ } else if (hdr->ipv6) {
+ hdr->eth = data;
+ hdr->ipv6 = (void *)hdr->eth + sizeof(*hdr->eth);
+ hdr->tcp = (void *)hdr->ipv6 + sizeof(*hdr->ipv6);
+ } else {
+ return XDP_ABORTED;
+ }
+
+ if ((void *)hdr->tcp + TCP_MAXLEN > data_end)
+ return XDP_ABORTED;
+
+ /* We run out of registers, tcp_len gets spilled to the stack, and the
+ * verifier forgets its min and max values checked above in tcp_dissect.
+ */
+ hdr->tcp_len = hdr->tcp->doff * 4;
+ if (hdr->tcp_len < sizeof(*hdr->tcp))
+ return XDP_ABORTED;
+
+ return hdr->tcp->syn ? syncookie_handle_syn(hdr, ctx, data, data_end, xdp) :
+ syncookie_handle_ack(hdr);
+}
+
+SEC("xdp")
+int syncookie_xdp(struct xdp_md *ctx)
+{
+ void *data_end = (void *)(long)ctx->data_end;
+ void *data = (void *)(long)ctx->data;
+ struct header_pointers hdr;
+ int ret;
+
+ ret = syncookie_part1(ctx, data, data_end, &hdr, true);
+ if (ret != XDP_TX)
+ return ret;
+
+ data_end = (void *)(long)ctx->data_end;
+ data = (void *)(long)ctx->data;
+
+ return syncookie_part2(ctx, data, data_end, &hdr, true);
+}
+
+SEC("tc")
+int syncookie_tc(struct __sk_buff *skb)
+{
+ void *data_end = (void *)(long)skb->data_end;
+ void *data = (void *)(long)skb->data;
+ struct header_pointers hdr;
+ int ret;
+
+ ret = syncookie_part1(skb, data, data_end, &hdr, false);
+ if (ret != XDP_TX)
+ return ret == XDP_PASS ? TC_ACT_OK : TC_ACT_SHOT;
+
+ data_end = (void *)(long)skb->data_end;
+ data = (void *)(long)skb->data;
+
+ ret = syncookie_part2(skb, data, data_end, &hdr, false);
+ switch (ret) {
+ case XDP_PASS:
+ return TC_ACT_OK;
+ case XDP_TX:
+ return bpf_redirect(skb->ifindex, 0);
+ default:
+ return TC_ACT_SHOT;
+ }
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/sdt-config.h b/tools/testing/selftests/bpf/sdt-config.h
new file mode 100644
index 000000000000..733045a52771
--- /dev/null
+++ b/tools/testing/selftests/bpf/sdt-config.h
@@ -0,0 +1,6 @@
+/* includes/sys/sdt-config.h. Generated from sdt-config.h.in by configure.
+
+ This file just defines _SDT_ASM_SECTION_AUTOGROUP_SUPPORT to 0 or 1 to
+ indicate whether the assembler supports "?" in .pushsection directives. */
+
+#define _SDT_ASM_SECTION_AUTOGROUP_SUPPORT 1
diff --git a/tools/testing/selftests/bpf/sdt.h b/tools/testing/selftests/bpf/sdt.h
new file mode 100644
index 000000000000..ca0162b4dc57
--- /dev/null
+++ b/tools/testing/selftests/bpf/sdt.h
@@ -0,0 +1,513 @@
+/* <sys/sdt.h> - Systemtap static probe definition macros.
+
+ This file is dedicated to the public domain, pursuant to CC0
+ (https://creativecommons.org/publicdomain/zero/1.0/)
+*/
+
+#ifndef _SYS_SDT_H
+#define _SYS_SDT_H 1
+
+/*
+ This file defines a family of macros
+
+ STAP_PROBEn(op1, ..., opn)
+
+ that emit a nop into the instruction stream, and some data into an auxiliary
+ note section. The data in the note section describes the operands, in terms
+ of size and location. Each location is encoded as assembler operand string.
+ Consumer tools such as gdb or systemtap insert breakpoints on top of
+ the nop, and decode the location operand-strings, like an assembler,
+ to find the values being passed.
+
+ The operand strings are selected by the compiler for each operand.
+ They are constrained by gcc inline-assembler codes. The default is:
+
+ #define STAP_SDT_ARG_CONSTRAINT nor
+
+ This is a good default if the operands tend to be integral and
+ moderate in number (smaller than number of registers). In other
+ cases, the compiler may report "'asm' requires impossible reload" or
+ similar. In this case, consider simplifying the macro call (fewer
+ and simpler operands), reduce optimization, or override the default
+ constraints string via:
+
+ #define STAP_SDT_ARG_CONSTRAINT g
+ #include <sys/sdt.h>
+
+ See also:
+ https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
+ https://gcc.gnu.org/onlinedocs/gcc/Constraints.html
+ */
+
+
+
+#ifdef __ASSEMBLER__
+# define _SDT_PROBE(provider, name, n, arglist) \
+ _SDT_ASM_BODY(provider, name, _SDT_ASM_SUBSTR_1, (_SDT_DEPAREN_##n arglist)) \
+ _SDT_ASM_BASE
+# define _SDT_ASM_1(x) x;
+# define _SDT_ASM_2(a, b) a,b;
+# define _SDT_ASM_3(a, b, c) a,b,c;
+# define _SDT_ASM_5(a, b, c, d, e) a,b,c,d,e;
+# define _SDT_ASM_STRING_1(x) .asciz #x;
+# define _SDT_ASM_SUBSTR_1(x) .ascii #x;
+# define _SDT_DEPAREN_0() /* empty */
+# define _SDT_DEPAREN_1(a) a
+# define _SDT_DEPAREN_2(a,b) a b
+# define _SDT_DEPAREN_3(a,b,c) a b c
+# define _SDT_DEPAREN_4(a,b,c,d) a b c d
+# define _SDT_DEPAREN_5(a,b,c,d,e) a b c d e
+# define _SDT_DEPAREN_6(a,b,c,d,e,f) a b c d e f
+# define _SDT_DEPAREN_7(a,b,c,d,e,f,g) a b c d e f g
+# define _SDT_DEPAREN_8(a,b,c,d,e,f,g,h) a b c d e f g h
+# define _SDT_DEPAREN_9(a,b,c,d,e,f,g,h,i) a b c d e f g h i
+# define _SDT_DEPAREN_10(a,b,c,d,e,f,g,h,i,j) a b c d e f g h i j
+# define _SDT_DEPAREN_11(a,b,c,d,e,f,g,h,i,j,k) a b c d e f g h i j k
+# define _SDT_DEPAREN_12(a,b,c,d,e,f,g,h,i,j,k,l) a b c d e f g h i j k l
+#else
+#if defined _SDT_HAS_SEMAPHORES
+#define _SDT_NOTE_SEMAPHORE_USE(provider, name) \
+ __asm__ __volatile__ ("" :: "m" (provider##_##name##_semaphore));
+#else
+#define _SDT_NOTE_SEMAPHORE_USE(provider, name)
+#endif
+
+# define _SDT_PROBE(provider, name, n, arglist) \
+ do { \
+ _SDT_NOTE_SEMAPHORE_USE(provider, name); \
+ __asm__ __volatile__ (_SDT_ASM_BODY(provider, name, _SDT_ASM_ARGS, (n)) \
+ :: _SDT_ASM_OPERANDS_##n arglist); \
+ __asm__ __volatile__ (_SDT_ASM_BASE); \
+ } while (0)
+# define _SDT_S(x) #x
+# define _SDT_ASM_1(x) _SDT_S(x) "\n"
+# define _SDT_ASM_2(a, b) _SDT_S(a) "," _SDT_S(b) "\n"
+# define _SDT_ASM_3(a, b, c) _SDT_S(a) "," _SDT_S(b) "," \
+ _SDT_S(c) "\n"
+# define _SDT_ASM_5(a, b, c, d, e) _SDT_S(a) "," _SDT_S(b) "," \
+ _SDT_S(c) "," _SDT_S(d) "," \
+ _SDT_S(e) "\n"
+# define _SDT_ASM_ARGS(n) _SDT_ASM_TEMPLATE_##n
+# define _SDT_ASM_STRING_1(x) _SDT_ASM_1(.asciz #x)
+# define _SDT_ASM_SUBSTR_1(x) _SDT_ASM_1(.ascii #x)
+
+# define _SDT_ARGFMT(no) _SDT_ASM_1(_SDT_SIGN %n[_SDT_S##no]) \
+ _SDT_ASM_1(_SDT_SIZE %n[_SDT_S##no]) \
+ _SDT_ASM_1(_SDT_TYPE %n[_SDT_S##no]) \
+ _SDT_ASM_SUBSTR(_SDT_ARGTMPL(_SDT_A##no))
+
+
+# ifndef STAP_SDT_ARG_CONSTRAINT
+# if defined __powerpc__
+# define STAP_SDT_ARG_CONSTRAINT nZr
+# elif defined __arm__
+# define STAP_SDT_ARG_CONSTRAINT g
+# else
+# define STAP_SDT_ARG_CONSTRAINT nor
+# endif
+# endif
+
+# define _SDT_STRINGIFY(x) #x
+# define _SDT_ARG_CONSTRAINT_STRING(x) _SDT_STRINGIFY(x)
+/* _SDT_S encodes the size and type as 0xSSTT which is decoded by the assembler
+ macros _SDT_SIZE and _SDT_TYPE */
+# define _SDT_ARG(n, x) \
+ [_SDT_S##n] "n" ((_SDT_ARGSIGNED (x) ? (int)-1 : 1) * (-(((int) _SDT_ARGSIZE (x)) << 8) + (-(0x7f & __builtin_classify_type (x))))), \
+ [_SDT_A##n] _SDT_ARG_CONSTRAINT_STRING (STAP_SDT_ARG_CONSTRAINT) (_SDT_ARGVAL (x))
+#endif
+#define _SDT_ASM_STRING(x) _SDT_ASM_STRING_1(x)
+#define _SDT_ASM_SUBSTR(x) _SDT_ASM_SUBSTR_1(x)
+
+#define _SDT_ARGARRAY(x) (__builtin_classify_type (x) == 14 \
+ || __builtin_classify_type (x) == 5)
+
+#ifdef __cplusplus
+# define _SDT_ARGSIGNED(x) (!_SDT_ARGARRAY (x) \
+ && __sdt_type<__typeof (x)>::__sdt_signed)
+# define _SDT_ARGSIZE(x) (_SDT_ARGARRAY (x) \
+ ? sizeof (void *) : sizeof (x))
+# define _SDT_ARGVAL(x) (x)
+
+# include <cstddef>
+
+template<typename __sdt_T>
+struct __sdt_type
+{
+ static const bool __sdt_signed = false;
+};
+
+#define __SDT_ALWAYS_SIGNED(T) \
+template<> struct __sdt_type<T> { static const bool __sdt_signed = true; };
+#define __SDT_COND_SIGNED(T,CT) \
+template<> struct __sdt_type<T> { static const bool __sdt_signed = ((CT)(-1) < 1); };
+__SDT_ALWAYS_SIGNED(signed char)
+__SDT_ALWAYS_SIGNED(short)
+__SDT_ALWAYS_SIGNED(int)
+__SDT_ALWAYS_SIGNED(long)
+__SDT_ALWAYS_SIGNED(long long)
+__SDT_ALWAYS_SIGNED(volatile signed char)
+__SDT_ALWAYS_SIGNED(volatile short)
+__SDT_ALWAYS_SIGNED(volatile int)
+__SDT_ALWAYS_SIGNED(volatile long)
+__SDT_ALWAYS_SIGNED(volatile long long)
+__SDT_ALWAYS_SIGNED(const signed char)
+__SDT_ALWAYS_SIGNED(const short)
+__SDT_ALWAYS_SIGNED(const int)
+__SDT_ALWAYS_SIGNED(const long)
+__SDT_ALWAYS_SIGNED(const long long)
+__SDT_ALWAYS_SIGNED(const volatile signed char)
+__SDT_ALWAYS_SIGNED(const volatile short)
+__SDT_ALWAYS_SIGNED(const volatile int)
+__SDT_ALWAYS_SIGNED(const volatile long)
+__SDT_ALWAYS_SIGNED(const volatile long long)
+__SDT_COND_SIGNED(char, char)
+__SDT_COND_SIGNED(wchar_t, wchar_t)
+__SDT_COND_SIGNED(volatile char, char)
+__SDT_COND_SIGNED(volatile wchar_t, wchar_t)
+__SDT_COND_SIGNED(const char, char)
+__SDT_COND_SIGNED(const wchar_t, wchar_t)
+__SDT_COND_SIGNED(const volatile char, char)
+__SDT_COND_SIGNED(const volatile wchar_t, wchar_t)
+#if defined (__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4))
+/* __SDT_COND_SIGNED(char16_t) */
+/* __SDT_COND_SIGNED(char32_t) */
+#endif
+
+template<typename __sdt_E>
+struct __sdt_type<__sdt_E[]> : public __sdt_type<__sdt_E *> {};
+
+template<typename __sdt_E, size_t __sdt_N>
+struct __sdt_type<__sdt_E[__sdt_N]> : public __sdt_type<__sdt_E *> {};
+
+#elif !defined(__ASSEMBLER__)
+__extension__ extern unsigned long long __sdt_unsp;
+# define _SDT_ARGINTTYPE(x) \
+ __typeof (__builtin_choose_expr (((__builtin_classify_type (x) \
+ + 3) & -4) == 4, (x), 0U))
+# define _SDT_ARGSIGNED(x) \
+ (!__extension__ \
+ (__builtin_constant_p ((((unsigned long long) \
+ (_SDT_ARGINTTYPE (x)) __sdt_unsp) \
+ & ((unsigned long long)1 << (sizeof (unsigned long long) \
+ * __CHAR_BIT__ - 1))) == 0) \
+ || (_SDT_ARGINTTYPE (x)) -1 > (_SDT_ARGINTTYPE (x)) 0))
+# define _SDT_ARGSIZE(x) \
+ (_SDT_ARGARRAY (x) ? sizeof (void *) : sizeof (x))
+# define _SDT_ARGVAL(x) (x)
+#endif
+
+#if defined __powerpc__ || defined __powerpc64__
+# define _SDT_ARGTMPL(id) %I[id]%[id]
+#elif defined __i386__
+# define _SDT_ARGTMPL(id) %k[id] /* gcc.gnu.org/PR80115 sourceware.org/PR24541 */
+#else
+# define _SDT_ARGTMPL(id) %[id]
+#endif
+
+/* NB: gdb PR24541 highlighted an unspecified corner of the sdt.h
+ operand note format.
+
+ The named register may be a longer or shorter (!) alias for the
+ storage where the value in question is found. For example, on
+ i386, 64-bit value may be put in register pairs, and the register
+ name stored would identify just one of them. Previously, gcc was
+ asked to emit the %w[id] (16-bit alias of some registers holding
+ operands), even when a wider 32-bit value was used.
+
+ Bottom line: the byte-width given before the @ sign governs. If
+ there is a mismatch between that width and that of the named
+ register, then a sys/sdt.h note consumer may need to employ
+ architecture-specific heuristics to figure out where the compiler
+ has actually put the complete value.
+*/
+
+#ifdef __LP64__
+# define _SDT_ASM_ADDR .8byte
+#else
+# define _SDT_ASM_ADDR .4byte
+#endif
+
+/* The ia64 and s390 nop instructions take an argument. */
+#if defined(__ia64__) || defined(__s390__) || defined(__s390x__)
+#define _SDT_NOP nop 0
+#else
+#define _SDT_NOP nop
+#endif
+
+#define _SDT_NOTE_NAME "stapsdt"
+#define _SDT_NOTE_TYPE 3
+
+/* If the assembler supports the necessary feature, then we can play
+ nice with code in COMDAT sections, which comes up in C++ code.
+ Without that assembler support, some combinations of probe placements
+ in certain kinds of C++ code may produce link-time errors. */
+#include "sdt-config.h"
+#if _SDT_ASM_SECTION_AUTOGROUP_SUPPORT
+# define _SDT_ASM_AUTOGROUP "?"
+#else
+# define _SDT_ASM_AUTOGROUP ""
+#endif
+
+#define _SDT_DEF_MACROS \
+ _SDT_ASM_1(.altmacro) \
+ _SDT_ASM_1(.macro _SDT_SIGN x) \
+ _SDT_ASM_3(.pushsection .note.stapsdt,"","note") \
+ _SDT_ASM_1(.iflt \\x) \
+ _SDT_ASM_1(.ascii "-") \
+ _SDT_ASM_1(.endif) \
+ _SDT_ASM_1(.popsection) \
+ _SDT_ASM_1(.endm) \
+ _SDT_ASM_1(.macro _SDT_SIZE_ x) \
+ _SDT_ASM_3(.pushsection .note.stapsdt,"","note") \
+ _SDT_ASM_1(.ascii "\x") \
+ _SDT_ASM_1(.popsection) \
+ _SDT_ASM_1(.endm) \
+ _SDT_ASM_1(.macro _SDT_SIZE x) \
+ _SDT_ASM_1(_SDT_SIZE_ %%((-(-\\x*((-\\x>0)-(-\\x<0))))>>8)) \
+ _SDT_ASM_1(.endm) \
+ _SDT_ASM_1(.macro _SDT_TYPE_ x) \
+ _SDT_ASM_3(.pushsection .note.stapsdt,"","note") \
+ _SDT_ASM_2(.ifc 8,\\x) \
+ _SDT_ASM_1(.ascii "f") \
+ _SDT_ASM_1(.endif) \
+ _SDT_ASM_1(.ascii "@") \
+ _SDT_ASM_1(.popsection) \
+ _SDT_ASM_1(.endm) \
+ _SDT_ASM_1(.macro _SDT_TYPE x) \
+ _SDT_ASM_1(_SDT_TYPE_ %%((\\x)&(0xff))) \
+ _SDT_ASM_1(.endm)
+
+#define _SDT_UNDEF_MACROS \
+ _SDT_ASM_1(.purgem _SDT_SIGN) \
+ _SDT_ASM_1(.purgem _SDT_SIZE_) \
+ _SDT_ASM_1(.purgem _SDT_SIZE) \
+ _SDT_ASM_1(.purgem _SDT_TYPE_) \
+ _SDT_ASM_1(.purgem _SDT_TYPE)
+
+#define _SDT_ASM_BODY(provider, name, pack_args, args, ...) \
+ _SDT_DEF_MACROS \
+ _SDT_ASM_1(990: _SDT_NOP) \
+ _SDT_ASM_3( .pushsection .note.stapsdt,_SDT_ASM_AUTOGROUP,"note") \
+ _SDT_ASM_1( .balign 4) \
+ _SDT_ASM_3( .4byte 992f-991f, 994f-993f, _SDT_NOTE_TYPE) \
+ _SDT_ASM_1(991: .asciz _SDT_NOTE_NAME) \
+ _SDT_ASM_1(992: .balign 4) \
+ _SDT_ASM_1(993: _SDT_ASM_ADDR 990b) \
+ _SDT_ASM_1( _SDT_ASM_ADDR _.stapsdt.base) \
+ _SDT_SEMAPHORE(provider,name) \
+ _SDT_ASM_STRING(provider) \
+ _SDT_ASM_STRING(name) \
+ pack_args args \
+ _SDT_ASM_SUBSTR(\x00) \
+ _SDT_UNDEF_MACROS \
+ _SDT_ASM_1(994: .balign 4) \
+ _SDT_ASM_1( .popsection)
+
+#define _SDT_ASM_BASE \
+ _SDT_ASM_1(.ifndef _.stapsdt.base) \
+ _SDT_ASM_5( .pushsection .stapsdt.base,"aG","progbits", \
+ .stapsdt.base,comdat) \
+ _SDT_ASM_1( .weak _.stapsdt.base) \
+ _SDT_ASM_1( .hidden _.stapsdt.base) \
+ _SDT_ASM_1( _.stapsdt.base: .space 1) \
+ _SDT_ASM_2( .size _.stapsdt.base, 1) \
+ _SDT_ASM_1( .popsection) \
+ _SDT_ASM_1(.endif)
+
+#if defined _SDT_HAS_SEMAPHORES
+#define _SDT_SEMAPHORE(p,n) \
+ _SDT_ASM_1( _SDT_ASM_ADDR p##_##n##_semaphore)
+#else
+#define _SDT_SEMAPHORE(p,n) _SDT_ASM_1( _SDT_ASM_ADDR 0)
+#endif
+
+#define _SDT_ASM_BLANK _SDT_ASM_SUBSTR(\x20)
+#define _SDT_ASM_TEMPLATE_0 /* no arguments */
+#define _SDT_ASM_TEMPLATE_1 _SDT_ARGFMT(1)
+#define _SDT_ASM_TEMPLATE_2 _SDT_ASM_TEMPLATE_1 _SDT_ASM_BLANK _SDT_ARGFMT(2)
+#define _SDT_ASM_TEMPLATE_3 _SDT_ASM_TEMPLATE_2 _SDT_ASM_BLANK _SDT_ARGFMT(3)
+#define _SDT_ASM_TEMPLATE_4 _SDT_ASM_TEMPLATE_3 _SDT_ASM_BLANK _SDT_ARGFMT(4)
+#define _SDT_ASM_TEMPLATE_5 _SDT_ASM_TEMPLATE_4 _SDT_ASM_BLANK _SDT_ARGFMT(5)
+#define _SDT_ASM_TEMPLATE_6 _SDT_ASM_TEMPLATE_5 _SDT_ASM_BLANK _SDT_ARGFMT(6)
+#define _SDT_ASM_TEMPLATE_7 _SDT_ASM_TEMPLATE_6 _SDT_ASM_BLANK _SDT_ARGFMT(7)
+#define _SDT_ASM_TEMPLATE_8 _SDT_ASM_TEMPLATE_7 _SDT_ASM_BLANK _SDT_ARGFMT(8)
+#define _SDT_ASM_TEMPLATE_9 _SDT_ASM_TEMPLATE_8 _SDT_ASM_BLANK _SDT_ARGFMT(9)
+#define _SDT_ASM_TEMPLATE_10 _SDT_ASM_TEMPLATE_9 _SDT_ASM_BLANK _SDT_ARGFMT(10)
+#define _SDT_ASM_TEMPLATE_11 _SDT_ASM_TEMPLATE_10 _SDT_ASM_BLANK _SDT_ARGFMT(11)
+#define _SDT_ASM_TEMPLATE_12 _SDT_ASM_TEMPLATE_11 _SDT_ASM_BLANK _SDT_ARGFMT(12)
+#define _SDT_ASM_OPERANDS_0() [__sdt_dummy] "g" (0)
+#define _SDT_ASM_OPERANDS_1(arg1) _SDT_ARG(1, arg1)
+#define _SDT_ASM_OPERANDS_2(arg1, arg2) \
+ _SDT_ASM_OPERANDS_1(arg1), _SDT_ARG(2, arg2)
+#define _SDT_ASM_OPERANDS_3(arg1, arg2, arg3) \
+ _SDT_ASM_OPERANDS_2(arg1, arg2), _SDT_ARG(3, arg3)
+#define _SDT_ASM_OPERANDS_4(arg1, arg2, arg3, arg4) \
+ _SDT_ASM_OPERANDS_3(arg1, arg2, arg3), _SDT_ARG(4, arg4)
+#define _SDT_ASM_OPERANDS_5(arg1, arg2, arg3, arg4, arg5) \
+ _SDT_ASM_OPERANDS_4(arg1, arg2, arg3, arg4), _SDT_ARG(5, arg5)
+#define _SDT_ASM_OPERANDS_6(arg1, arg2, arg3, arg4, arg5, arg6) \
+ _SDT_ASM_OPERANDS_5(arg1, arg2, arg3, arg4, arg5), _SDT_ARG(6, arg6)
+#define _SDT_ASM_OPERANDS_7(arg1, arg2, arg3, arg4, arg5, arg6, arg7) \
+ _SDT_ASM_OPERANDS_6(arg1, arg2, arg3, arg4, arg5, arg6), _SDT_ARG(7, arg7)
+#define _SDT_ASM_OPERANDS_8(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) \
+ _SDT_ASM_OPERANDS_7(arg1, arg2, arg3, arg4, arg5, arg6, arg7), \
+ _SDT_ARG(8, arg8)
+#define _SDT_ASM_OPERANDS_9(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9) \
+ _SDT_ASM_OPERANDS_8(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8), \
+ _SDT_ARG(9, arg9)
+#define _SDT_ASM_OPERANDS_10(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10) \
+ _SDT_ASM_OPERANDS_9(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9), \
+ _SDT_ARG(10, arg10)
+#define _SDT_ASM_OPERANDS_11(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11) \
+ _SDT_ASM_OPERANDS_10(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10), \
+ _SDT_ARG(11, arg11)
+#define _SDT_ASM_OPERANDS_12(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11,arg12) \
+ _SDT_ASM_OPERANDS_11(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11), \
+ _SDT_ARG(12, arg12)
+
+/* These macros can be used in C, C++, or assembly code.
+ In assembly code the arguments should use normal assembly operand syntax. */
+
+#define STAP_PROBE(provider, name) \
+ _SDT_PROBE(provider, name, 0, ())
+#define STAP_PROBE1(provider, name, arg1) \
+ _SDT_PROBE(provider, name, 1, (arg1))
+#define STAP_PROBE2(provider, name, arg1, arg2) \
+ _SDT_PROBE(provider, name, 2, (arg1, arg2))
+#define STAP_PROBE3(provider, name, arg1, arg2, arg3) \
+ _SDT_PROBE(provider, name, 3, (arg1, arg2, arg3))
+#define STAP_PROBE4(provider, name, arg1, arg2, arg3, arg4) \
+ _SDT_PROBE(provider, name, 4, (arg1, arg2, arg3, arg4))
+#define STAP_PROBE5(provider, name, arg1, arg2, arg3, arg4, arg5) \
+ _SDT_PROBE(provider, name, 5, (arg1, arg2, arg3, arg4, arg5))
+#define STAP_PROBE6(provider, name, arg1, arg2, arg3, arg4, arg5, arg6) \
+ _SDT_PROBE(provider, name, 6, (arg1, arg2, arg3, arg4, arg5, arg6))
+#define STAP_PROBE7(provider, name, arg1, arg2, arg3, arg4, arg5, arg6, arg7) \
+ _SDT_PROBE(provider, name, 7, (arg1, arg2, arg3, arg4, arg5, arg6, arg7))
+#define STAP_PROBE8(provider,name,arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8) \
+ _SDT_PROBE(provider, name, 8, (arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8))
+#define STAP_PROBE9(provider,name,arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9)\
+ _SDT_PROBE(provider, name, 9, (arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9))
+#define STAP_PROBE10(provider,name,arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10) \
+ _SDT_PROBE(provider, name, 10, \
+ (arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10))
+#define STAP_PROBE11(provider,name,arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11) \
+ _SDT_PROBE(provider, name, 11, \
+ (arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11))
+#define STAP_PROBE12(provider,name,arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11,arg12) \
+ _SDT_PROBE(provider, name, 12, \
+ (arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10,arg11,arg12))
+
+/* This STAP_PROBEV macro can be used in variadic scenarios, where the
+ number of probe arguments is not known until compile time. Since
+ variadic macro support may vary with compiler options, you must
+ pre-#define SDT_USE_VARIADIC to enable this type of probe.
+
+ The trick to count __VA_ARGS__ was inspired by this post by
+ Laurent Deniau <laurent.deniau@cern.ch>:
+ http://groups.google.com/group/comp.std.c/msg/346fc464319b1ee5
+
+ Note that our _SDT_NARG is called with an extra 0 arg that's not
+ counted, so we don't have to worry about the behavior of macros
+ called without any arguments. */
+
+#define _SDT_NARG(...) __SDT_NARG(__VA_ARGS__, 12,11,10,9,8,7,6,5,4,3,2,1,0)
+#define __SDT_NARG(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12, N, ...) N
+#ifdef SDT_USE_VARIADIC
+#define _SDT_PROBE_N(provider, name, N, ...) \
+ _SDT_PROBE(provider, name, N, (__VA_ARGS__))
+#define STAP_PROBEV(provider, name, ...) \
+ _SDT_PROBE_N(provider, name, _SDT_NARG(0, ##__VA_ARGS__), ##__VA_ARGS__)
+#endif
+
+/* These macros are for use in asm statements. You must compile
+ with -std=gnu99 or -std=c99 to use the STAP_PROBE_ASM macro.
+
+ The STAP_PROBE_ASM macro generates a quoted string to be used in the
+ template portion of the asm statement, concatenated with strings that
+ contain the actual assembly code around the probe site.
+
+ For example:
+
+ asm ("before\n"
+ STAP_PROBE_ASM(provider, fooprobe, %eax 4(%esi))
+ "after");
+
+ emits the assembly code for "before\nafter", with a probe in between.
+ The probe arguments are the %eax register, and the value of the memory
+ word located 4 bytes past the address in the %esi register. Note that
+ because this is a simple asm, not a GNU C extended asm statement, these
+ % characters do not need to be doubled to generate literal %reg names.
+
+ In a GNU C extended asm statement, the probe arguments can be specified
+ using the macro STAP_PROBE_ASM_TEMPLATE(n) for n arguments. The paired
+ macro STAP_PROBE_ASM_OPERANDS gives the C values of these probe arguments,
+ and appears in the input operand list of the asm statement. For example:
+
+ asm ("someinsn %0,%1\n" // %0 is output operand, %1 is input operand
+ STAP_PROBE_ASM(provider, fooprobe, STAP_PROBE_ASM_TEMPLATE(3))
+ "otherinsn %[namedarg]"
+ : "r" (outvar)
+ : "g" (some_value), [namedarg] "i" (1234),
+ STAP_PROBE_ASM_OPERANDS(3, some_value, some_ptr->field, 1234));
+
+ This is just like writing:
+
+ STAP_PROBE3(provider, fooprobe, some_value, some_ptr->field, 1234));
+
+ but the probe site is right between "someinsn" and "otherinsn".
+
+ The probe arguments in STAP_PROBE_ASM can be given as assembly
+ operands instead, even inside a GNU C extended asm statement.
+ Note that these can use operand templates like %0 or %[name],
+ and likewise they must write %%reg for a literal operand of %reg. */
+
+#define _SDT_ASM_BODY_1(p,n,...) _SDT_ASM_BODY(p,n,_SDT_ASM_SUBSTR,(__VA_ARGS__))
+#define _SDT_ASM_BODY_2(p,n,...) _SDT_ASM_BODY(p,n,/*_SDT_ASM_STRING */,__VA_ARGS__)
+#define _SDT_ASM_BODY_N2(p,n,no,...) _SDT_ASM_BODY_ ## no(p,n,__VA_ARGS__)
+#define _SDT_ASM_BODY_N1(p,n,no,...) _SDT_ASM_BODY_N2(p,n,no,__VA_ARGS__)
+#define _SDT_ASM_BODY_N(p,n,...) _SDT_ASM_BODY_N1(p,n,_SDT_NARG(0, __VA_ARGS__),__VA_ARGS__)
+
+#if __STDC_VERSION__ >= 199901L
+# define STAP_PROBE_ASM(provider, name, ...) \
+ _SDT_ASM_BODY_N(provider, name, __VA_ARGS__) \
+ _SDT_ASM_BASE
+# define STAP_PROBE_ASM_OPERANDS(n, ...) _SDT_ASM_OPERANDS_##n(__VA_ARGS__)
+#else
+# define STAP_PROBE_ASM(provider, name, args) \
+ _SDT_ASM_BODY(provider, name, /* _SDT_ASM_STRING */, (args)) \
+ _SDT_ASM_BASE
+#endif
+#define STAP_PROBE_ASM_TEMPLATE(n) _SDT_ASM_TEMPLATE_##n,"use _SDT_ASM_TEMPLATE_"
+
+
+/* DTrace compatible macro names. */
+#define DTRACE_PROBE(provider,probe) \
+ STAP_PROBE(provider,probe)
+#define DTRACE_PROBE1(provider,probe,parm1) \
+ STAP_PROBE1(provider,probe,parm1)
+#define DTRACE_PROBE2(provider,probe,parm1,parm2) \
+ STAP_PROBE2(provider,probe,parm1,parm2)
+#define DTRACE_PROBE3(provider,probe,parm1,parm2,parm3) \
+ STAP_PROBE3(provider,probe,parm1,parm2,parm3)
+#define DTRACE_PROBE4(provider,probe,parm1,parm2,parm3,parm4) \
+ STAP_PROBE4(provider,probe,parm1,parm2,parm3,parm4)
+#define DTRACE_PROBE5(provider,probe,parm1,parm2,parm3,parm4,parm5) \
+ STAP_PROBE5(provider,probe,parm1,parm2,parm3,parm4,parm5)
+#define DTRACE_PROBE6(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6) \
+ STAP_PROBE6(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6)
+#define DTRACE_PROBE7(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7) \
+ STAP_PROBE7(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7)
+#define DTRACE_PROBE8(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7,parm8) \
+ STAP_PROBE8(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7,parm8)
+#define DTRACE_PROBE9(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7,parm8,parm9) \
+ STAP_PROBE9(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7,parm8,parm9)
+#define DTRACE_PROBE10(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7,parm8,parm9,parm10) \
+ STAP_PROBE10(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7,parm8,parm9,parm10)
+#define DTRACE_PROBE11(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7,parm8,parm9,parm10,parm11) \
+ STAP_PROBE11(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7,parm8,parm9,parm10,parm11)
+#define DTRACE_PROBE12(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7,parm8,parm9,parm10,parm11,parm12) \
+ STAP_PROBE12(provider,probe,parm1,parm2,parm3,parm4,parm5,parm6,parm7,parm8,parm9,parm10,parm11,parm12)
+
+
+#endif /* sys/sdt.h */
diff --git a/tools/testing/selftests/bpf/test_bpftool_synctypes.py b/tools/testing/selftests/bpf/test_bpftool_synctypes.py
index 6bf21e47882a..a6410bebe603 100755
--- a/tools/testing/selftests/bpf/test_bpftool_synctypes.py
+++ b/tools/testing/selftests/bpf/test_bpftool_synctypes.py
@@ -58,7 +58,7 @@ class BlockParser(object):
class ArrayParser(BlockParser):
"""
- A parser for extracting dicionaries of values from some BPF-related arrays.
+ A parser for extracting a set of values from some BPF-related arrays.
@reader: a pointer to the open file to parse
@array_name: name of the array to parse
"""
@@ -66,7 +66,7 @@ class ArrayParser(BlockParser):
def __init__(self, reader, array_name):
self.array_name = array_name
- self.start_marker = re.compile(f'(static )?const char \* const {self.array_name}\[.*\] = {{\n')
+ self.start_marker = re.compile(f'(static )?const bool {self.array_name}\[.*\] = {{\n')
super().__init__(reader)
def search_block(self):
@@ -80,15 +80,15 @@ class ArrayParser(BlockParser):
Parse a block and return data as a dictionary. Items to extract must be
on separate lines in the file.
"""
- pattern = re.compile('\[(BPF_\w*)\]\s*= "(.*)",?$')
- entries = {}
+ pattern = re.compile('\[(BPF_\w*)\]\s*= (true|false),?$')
+ entries = set()
while True:
line = self.reader.readline()
if line == '' or re.match(self.end_marker, line):
break
capture = pattern.search(line)
if capture:
- entries[capture.group(1)] = capture.group(2)
+ entries |= {capture.group(1)}
return entries
class InlineListParser(BlockParser):
@@ -115,7 +115,7 @@ class InlineListParser(BlockParser):
class FileExtractor(object):
"""
A generic reader for extracting data from a given file. This class contains
- several helper methods that wrap arround parser objects to extract values
+ several helper methods that wrap around parser objects to extract values
from different structures.
This class does not offer a way to set a filename, which is expected to be
defined in children classes.
@@ -139,21 +139,19 @@ class FileExtractor(object):
def get_types_from_array(self, array_name):
"""
- Search for and parse an array associating names to BPF_* enum members,
- for example:
+ Search for and parse a list of allowed BPF_* enum members, for example:
- const char * const prog_type_name[] = {
- [BPF_PROG_TYPE_UNSPEC] = "unspec",
- [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
- [BPF_PROG_TYPE_KPROBE] = "kprobe",
+ const bool prog_type_name[] = {
+ [BPF_PROG_TYPE_UNSPEC] = true,
+ [BPF_PROG_TYPE_SOCKET_FILTER] = true,
+ [BPF_PROG_TYPE_KPROBE] = true,
};
- Return a dictionary with the enum member names as keys and the
- associated names as values, for example:
+ Return a set of the enum members, for example:
- {'BPF_PROG_TYPE_UNSPEC': 'unspec',
- 'BPF_PROG_TYPE_SOCKET_FILTER': 'socket_filter',
- 'BPF_PROG_TYPE_KPROBE': 'kprobe'}
+ {'BPF_PROG_TYPE_UNSPEC',
+ 'BPF_PROG_TYPE_SOCKET_FILTER',
+ 'BPF_PROG_TYPE_KPROBE'}
@array_name: name of the array to parse
"""
@@ -180,12 +178,33 @@ class FileExtractor(object):
@enum_name: name of the enum to parse
"""
start_marker = re.compile(f'enum {enum_name} {{\n')
- pattern = re.compile('^\s*(BPF_\w+),?$')
+ pattern = re.compile('^\s*(BPF_\w+),?(\s+/\*.*\*/)?$')
end_marker = re.compile('^};')
parser = BlockParser(self.reader)
parser.search_block(start_marker)
return parser.parse(pattern, end_marker)
+ def make_enum_map(self, names, enum_prefix):
+ """
+ Search for and parse an enum containing BPF_* members, just as get_enum
+ does. However, instead of just returning a set of the variant names,
+ also generate a textual representation from them by (assuming and)
+ removing a provided prefix and lowercasing the remainder. Then return a
+ dict mapping from name to textual representation.
+
+ @enum_values: a set of enum values; e.g., as retrieved by get_enum
+ @enum_prefix: the prefix to remove from each of the variants to infer
+ textual representation
+ """
+ mapping = {}
+ for name in names:
+ if not name.startswith(enum_prefix):
+ raise Exception(f"enum variant {name} does not start with {enum_prefix}")
+ text = name[len(enum_prefix):].lower()
+ mapping[name] = text
+
+ return mapping
+
def __get_description_list(self, start_marker, pattern, end_marker):
parser = InlineListParser(self.reader)
parser.search_block(start_marker)
@@ -333,11 +352,9 @@ class ProgFileExtractor(SourceFileExtractor):
"""
filename = os.path.join(BPFTOOL_DIR, 'prog.c')
- def get_prog_types(self):
- return self.get_types_from_array('prog_type_name')
-
def get_attach_types(self):
- return self.get_types_from_array('attach_type_strings')
+ types = self.get_types_from_array('attach_types')
+ return self.make_enum_map(types, 'BPF_')
def get_prog_attach_help(self):
return self.get_help_list('ATTACH_TYPE')
@@ -348,9 +365,6 @@ class MapFileExtractor(SourceFileExtractor):
"""
filename = os.path.join(BPFTOOL_DIR, 'map.c')
- def get_map_types(self):
- return self.get_types_from_array('map_type_name')
-
def get_map_help(self):
return self.get_help_list('TYPE')
@@ -363,30 +377,6 @@ class CgroupFileExtractor(SourceFileExtractor):
def get_prog_attach_help(self):
return self.get_help_list('ATTACH_TYPE')
-class CommonFileExtractor(SourceFileExtractor):
- """
- An extractor for bpftool's common.c.
- """
- filename = os.path.join(BPFTOOL_DIR, 'common.c')
-
- def __init__(self):
- super().__init__()
- self.attach_types = {}
-
- def get_attach_types(self):
- if not self.attach_types:
- self.attach_types = self.get_types_from_array('attach_type_name')
- return self.attach_types
-
- def get_cgroup_attach_types(self):
- if not self.attach_types:
- self.get_attach_types()
- cgroup_types = {}
- for (key, value) in self.attach_types.items():
- if key.find('BPF_CGROUP') != -1:
- cgroup_types[key] = value
- return cgroup_types
-
class GenericSourceExtractor(SourceFileExtractor):
"""
An extractor for generic source code files.
@@ -403,14 +393,28 @@ class BpfHeaderExtractor(FileExtractor):
"""
filename = os.path.join(INCLUDE_DIR, 'uapi/linux/bpf.h')
+ def __init__(self):
+ super().__init__()
+ self.attach_types = {}
+
def get_prog_types(self):
return self.get_enum('bpf_prog_type')
- def get_map_types(self):
- return self.get_enum('bpf_map_type')
+ def get_map_type_map(self):
+ names = self.get_enum('bpf_map_type')
+ return self.make_enum_map(names, 'BPF_MAP_TYPE_')
- def get_attach_types(self):
- return self.get_enum('bpf_attach_type')
+ def get_attach_type_map(self):
+ if not self.attach_types:
+ names = self.get_enum('bpf_attach_type')
+ self.attach_types = self.make_enum_map(names, 'BPF_')
+ return self.attach_types
+
+ def get_cgroup_attach_type_map(self):
+ if not self.attach_types:
+ self.get_attach_type_map()
+ return {name: text for name, text in self.attach_types.items()
+ if name.startswith('BPF_CGROUP')}
class ManPageExtractor(FileExtractor):
"""
@@ -467,12 +471,6 @@ class BashcompExtractor(FileExtractor):
def get_prog_attach_types(self):
return self.get_bashcomp_list('BPFTOOL_PROG_ATTACH_TYPES')
- def get_map_types(self):
- return self.get_bashcomp_list('BPFTOOL_MAP_CREATE_TYPES')
-
- def get_cgroup_attach_types(self):
- return self.get_bashcomp_list('BPFTOOL_CGROUP_ATTACH_TYPES')
-
def verify(first_set, second_set, message):
"""
Print all values that differ between two sets.
@@ -495,21 +493,12 @@ def main():
""")
args = argParser.parse_args()
- # Map types (enum)
-
bpf_info = BpfHeaderExtractor()
- ref = bpf_info.get_map_types()
-
- map_info = MapFileExtractor()
- source_map_items = map_info.get_map_types()
- map_types_enum = set(source_map_items.keys())
-
- verify(ref, map_types_enum,
- f'Comparing BPF header (enum bpf_map_type) and {MapFileExtractor.filename} (map_type_name):')
# Map types (names)
- source_map_types = set(source_map_items.values())
+ map_info = MapFileExtractor()
+ source_map_types = set(bpf_info.get_map_type_map().values())
source_map_types.discard('unspec')
help_map_types = map_info.get_map_help()
@@ -521,41 +510,16 @@ def main():
man_map_types = man_map_info.get_map_types()
man_map_info.close()
- bashcomp_info = BashcompExtractor()
- bashcomp_map_types = bashcomp_info.get_map_types()
-
verify(source_map_types, help_map_types,
- f'Comparing {MapFileExtractor.filename} (map_type_name) and {MapFileExtractor.filename} (do_help() TYPE):')
+ f'Comparing {BpfHeaderExtractor.filename} (bpf_map_type) and {MapFileExtractor.filename} (do_help() TYPE):')
verify(source_map_types, man_map_types,
- f'Comparing {MapFileExtractor.filename} (map_type_name) and {ManMapExtractor.filename} (TYPE):')
+ f'Comparing {BpfHeaderExtractor.filename} (bpf_map_type) and {ManMapExtractor.filename} (TYPE):')
verify(help_map_options, man_map_options,
f'Comparing {MapFileExtractor.filename} (do_help() OPTIONS) and {ManMapExtractor.filename} (OPTIONS):')
- verify(source_map_types, bashcomp_map_types,
- f'Comparing {MapFileExtractor.filename} (map_type_name) and {BashcompExtractor.filename} (BPFTOOL_MAP_CREATE_TYPES):')
-
- # Program types (enum)
-
- ref = bpf_info.get_prog_types()
-
- prog_info = ProgFileExtractor()
- prog_types = set(prog_info.get_prog_types().keys())
-
- verify(ref, prog_types,
- f'Comparing BPF header (enum bpf_prog_type) and {ProgFileExtractor.filename} (prog_type_name):')
-
- # Attach types (enum)
-
- ref = bpf_info.get_attach_types()
- bpf_info.close()
-
- common_info = CommonFileExtractor()
- attach_types = common_info.get_attach_types()
-
- verify(ref, attach_types,
- f'Comparing BPF header (enum bpf_attach_type) and {CommonFileExtractor.filename} (attach_type_name):')
# Attach types (names)
+ prog_info = ProgFileExtractor()
source_prog_attach_types = set(prog_info.get_attach_types().values())
help_prog_attach_types = prog_info.get_prog_attach_help()
@@ -567,22 +531,23 @@ def main():
man_prog_attach_types = man_prog_info.get_attach_types()
man_prog_info.close()
- bashcomp_info.reset_read() # We stopped at map types, rewind
+
+ bashcomp_info = BashcompExtractor()
bashcomp_prog_attach_types = bashcomp_info.get_prog_attach_types()
+ bashcomp_info.close()
verify(source_prog_attach_types, help_prog_attach_types,
- f'Comparing {ProgFileExtractor.filename} (attach_type_strings) and {ProgFileExtractor.filename} (do_help() ATTACH_TYPE):')
+ f'Comparing {ProgFileExtractor.filename} (bpf_attach_type) and {ProgFileExtractor.filename} (do_help() ATTACH_TYPE):')
verify(source_prog_attach_types, man_prog_attach_types,
- f'Comparing {ProgFileExtractor.filename} (attach_type_strings) and {ManProgExtractor.filename} (ATTACH_TYPE):')
+ f'Comparing {ProgFileExtractor.filename} (bpf_attach_type) and {ManProgExtractor.filename} (ATTACH_TYPE):')
verify(help_prog_options, man_prog_options,
f'Comparing {ProgFileExtractor.filename} (do_help() OPTIONS) and {ManProgExtractor.filename} (OPTIONS):')
verify(source_prog_attach_types, bashcomp_prog_attach_types,
- f'Comparing {ProgFileExtractor.filename} (attach_type_strings) and {BashcompExtractor.filename} (BPFTOOL_PROG_ATTACH_TYPES):')
+ f'Comparing {ProgFileExtractor.filename} (bpf_attach_type) and {BashcompExtractor.filename} (BPFTOOL_PROG_ATTACH_TYPES):')
# Cgroup attach types
-
- source_cgroup_attach_types = set(common_info.get_cgroup_attach_types().values())
- common_info.close()
+ source_cgroup_attach_types = set(bpf_info.get_cgroup_attach_type_map().values())
+ bpf_info.close()
cgroup_info = CgroupFileExtractor()
help_cgroup_attach_types = cgroup_info.get_prog_attach_help()
@@ -594,17 +559,12 @@ def main():
man_cgroup_attach_types = man_cgroup_info.get_attach_types()
man_cgroup_info.close()
- bashcomp_cgroup_attach_types = bashcomp_info.get_cgroup_attach_types()
- bashcomp_info.close()
-
verify(source_cgroup_attach_types, help_cgroup_attach_types,
- f'Comparing {CommonFileExtractor.filename} (attach_type_strings) and {CgroupFileExtractor.filename} (do_help() ATTACH_TYPE):')
+ f'Comparing {BpfHeaderExtractor.filename} (bpf_attach_type) and {CgroupFileExtractor.filename} (do_help() ATTACH_TYPE):')
verify(source_cgroup_attach_types, man_cgroup_attach_types,
- f'Comparing {CommonFileExtractor.filename} (attach_type_strings) and {ManCgroupExtractor.filename} (ATTACH_TYPE):')
+ f'Comparing {BpfHeaderExtractor.filename} (bpf_attach_type) and {ManCgroupExtractor.filename} (ATTACH_TYPE):')
verify(help_cgroup_options, man_cgroup_options,
f'Comparing {CgroupFileExtractor.filename} (do_help() OPTIONS) and {ManCgroupExtractor.filename} (OPTIONS):')
- verify(source_cgroup_attach_types, bashcomp_cgroup_attach_types,
- f'Comparing {CommonFileExtractor.filename} (attach_type_strings) and {BashcompExtractor.filename} (BPFTOOL_CGROUP_ATTACH_TYPES):')
# Options for remaining commands
diff --git a/tools/testing/selftests/bpf/test_btf.h b/tools/testing/selftests/bpf/test_btf.h
index 128989bed8b7..fb4f4714eeb4 100644
--- a/tools/testing/selftests/bpf/test_btf.h
+++ b/tools/testing/selftests/bpf/test_btf.h
@@ -4,6 +4,8 @@
#ifndef _TEST_BTF_H
#define _TEST_BTF_H
+#define BTF_END_RAW 0xdeadbeef
+
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
@@ -39,6 +41,7 @@
#define BTF_MEMBER_ENC(name, type, bits_offset) \
(name), (type), (bits_offset)
#define BTF_ENUM_ENC(name, val) (name), (val)
+#define BTF_ENUM64_ENC(name, val_lo32, val_hi32) (name), (val_lo32), (val_hi32)
#define BTF_MEMBER_OFFSET(bitfield_size, bits_offset) \
((bitfield_size) << 24 | (bits_offset))
diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c
index d6a1be4d8020..0861ea60dcdd 100644
--- a/tools/testing/selftests/bpf/test_cgroup_storage.c
+++ b/tools/testing/selftests/bpf/test_cgroup_storage.c
@@ -6,7 +6,7 @@
#include <stdlib.h>
#include <sys/sysinfo.h>
-#include "bpf_rlimit.h"
+#include "bpf_util.h"
#include "cgroup_helpers.h"
#include "testing_helpers.h"
@@ -44,13 +44,16 @@ int main(int argc, char **argv)
unsigned long long *percpu_value;
int cpu, nproc;
- nproc = get_nprocs_conf();
+ nproc = bpf_num_possible_cpus();
percpu_value = malloc(sizeof(*percpu_value) * nproc);
if (!percpu_value) {
printf("Not enough memory for per-cpu area (%d cpus)\n", nproc);
goto err;
}
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
map_fd = bpf_map_create(BPF_MAP_TYPE_CGROUP_STORAGE, NULL, sizeof(key),
sizeof(value), 0, NULL);
if (map_fd < 0) {
diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c
index c299d3452695..7886265846a0 100644
--- a/tools/testing/selftests/bpf/test_dev_cgroup.c
+++ b/tools/testing/selftests/bpf/test_dev_cgroup.c
@@ -15,7 +15,6 @@
#include "cgroup_helpers.h"
#include "testing_helpers.h"
-#include "bpf_rlimit.h"
#define DEV_CGROUP_PROG "./dev_cgroup.o"
@@ -28,6 +27,9 @@ int main(int argc, char **argv)
int prog_fd, cgroup_fd;
__u32 prog_cnt;
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
if (bpf_prog_test_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE,
&obj, &prog_fd)) {
printf("Failed to load DEV_CGROUP program\n");
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
index aa294612e0a7..c028d621c744 100644
--- a/tools/testing/selftests/bpf/test_lpm_map.c
+++ b/tools/testing/selftests/bpf/test_lpm_map.c
@@ -26,7 +26,6 @@
#include <bpf/bpf.h>
#include "bpf_util.h"
-#include "bpf_rlimit.h"
struct tlpm_node {
struct tlpm_node *next;
@@ -409,16 +408,13 @@ static void test_lpm_ipaddr(void)
/* Test some lookups that should not match any entry */
inet_pton(AF_INET, "10.0.0.1", key_ipv4->data);
- assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -ENOENT);
inet_pton(AF_INET, "11.11.11.11", key_ipv4->data);
- assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -ENOENT);
inet_pton(AF_INET6, "2a00:ffff::", key_ipv6->data);
- assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == -ENOENT);
close(map_fd_ipv4);
close(map_fd_ipv6);
@@ -475,18 +471,15 @@ static void test_lpm_delete(void)
/* remove non-existent node */
key->prefixlen = 32;
inet_pton(AF_INET, "10.0.0.1", key->data);
- assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd, key, &value) == -ENOENT);
key->prefixlen = 30; // unused prefix so far
inet_pton(AF_INET, "192.255.0.0", key->data);
- assert(bpf_map_delete_elem(map_fd, key) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_delete_elem(map_fd, key) == -ENOENT);
key->prefixlen = 16; // same prefix as the root node
inet_pton(AF_INET, "192.255.0.0", key->data);
- assert(bpf_map_delete_elem(map_fd, key) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_delete_elem(map_fd, key) == -ENOENT);
/* assert initial lookup */
key->prefixlen = 32;
@@ -531,8 +524,7 @@ static void test_lpm_delete(void)
key->prefixlen = 32;
inet_pton(AF_INET, "192.168.128.1", key->data);
- assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd, key, &value) == -ENOENT);
close(map_fd);
}
@@ -553,8 +545,7 @@ static void test_lpm_get_next_key(void)
assert(map_fd >= 0);
/* empty tree. get_next_key should return ENOENT */
- assert(bpf_map_get_next_key(map_fd, NULL, key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, NULL, key_p) == -ENOENT);
/* get and verify the first key, get the second one should fail. */
key_p->prefixlen = 16;
@@ -566,8 +557,7 @@ static void test_lpm_get_next_key(void)
assert(key_p->prefixlen == 16 && key_p->data[0] == 192 &&
key_p->data[1] == 168);
- assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* no exact matching key should get the first one in post order. */
key_p->prefixlen = 8;
@@ -591,8 +581,7 @@ static void test_lpm_get_next_key(void)
next_key_p->data[1] == 168);
memcpy(key_p, next_key_p, key_size);
- assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* Add one more element (total three) */
key_p->prefixlen = 24;
@@ -615,8 +604,7 @@ static void test_lpm_get_next_key(void)
next_key_p->data[1] == 168);
memcpy(key_p, next_key_p, key_size);
- assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* Add one more element (total four) */
key_p->prefixlen = 24;
@@ -644,8 +632,7 @@ static void test_lpm_get_next_key(void)
next_key_p->data[1] == 168);
memcpy(key_p, next_key_p, key_size);
- assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* Add one more element (total five) */
key_p->prefixlen = 28;
@@ -679,8 +666,7 @@ static void test_lpm_get_next_key(void)
next_key_p->data[1] == 168);
memcpy(key_p, next_key_p, key_size);
- assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* no exact matching key should return the first one in post order */
key_p->prefixlen = 22;
@@ -791,6 +777,9 @@ int main(void)
/* we want predictable, pseudo random tests */
srand(0xf00ba1);
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
test_lpm_basic();
test_lpm_order();
diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c
index 563bbe18c172..4d0650cfb5cd 100644
--- a/tools/testing/selftests/bpf/test_lru_map.c
+++ b/tools/testing/selftests/bpf/test_lru_map.c
@@ -18,7 +18,6 @@
#include <bpf/libbpf.h>
#include "bpf_util.h"
-#include "bpf_rlimit.h"
#include "../../../include/linux/filter.h"
#define LOCAL_FREE_TARGET (128)
@@ -176,24 +175,20 @@ static void test_lru_sanity0(int map_type, int map_flags)
BPF_NOEXIST));
/* BPF_NOEXIST means: add new element if it doesn't exist */
- assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -1
- /* key=1 already exists */
- && errno == EEXIST);
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -EEXIST);
+ /* key=1 already exists */
- assert(bpf_map_update_elem(lru_map_fd, &key, value, -1) == -1 &&
- errno == EINVAL);
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, -1) == -EINVAL);
/* insert key=2 element */
/* check that key=2 is not found */
key = 2;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* BPF_EXIST means: update existing element */
- assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -1 &&
- /* key=2 is not there */
- errno == ENOENT);
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -ENOENT);
+ /* key=2 is not there */
assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
@@ -201,8 +196,7 @@ static void test_lru_sanity0(int map_type, int map_flags)
/* check that key=3 is not found */
key = 3;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* check that key=1 can be found and mark the ref bit to
* stop LRU from removing key=1
@@ -218,8 +212,7 @@ static void test_lru_sanity0(int map_type, int map_flags)
/* key=2 has been removed from the LRU */
key = 2;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* lookup elem key=1 and delete it, then check it doesn't exist */
key = 1;
@@ -382,8 +375,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
end_key = 1 + batch_size;
value[0] = 4321;
for (key = 1; key < end_key; key++) {
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
@@ -563,8 +555,7 @@ static void do_test_lru_sanity5(unsigned long long last_key, int map_fd)
assert(!bpf_map_lookup_elem_with_ref_bit(map_fd, key, value));
/* Cannot find the last key because it was removed by LRU */
- assert(bpf_map_lookup_elem(map_fd, &last_key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd, &last_key, value) == -ENOENT);
}
/* Test map with only one element */
@@ -712,21 +703,18 @@ static void test_lru_sanity7(int map_type, int map_flags)
BPF_NOEXIST));
/* BPF_NOEXIST means: add new element if it doesn't exist */
- assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -1
- /* key=1 already exists */
- && errno == EEXIST);
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -EEXIST);
+ /* key=1 already exists */
/* insert key=2 element */
/* check that key=2 is not found */
key = 2;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* BPF_EXIST means: update existing element */
- assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -1 &&
- /* key=2 is not there */
- errno == ENOENT);
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -ENOENT);
+ /* key=2 is not there */
assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
@@ -734,8 +722,7 @@ static void test_lru_sanity7(int map_type, int map_flags)
/* check that key=3 is not found */
key = 3;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* check that key=1 can be found and mark the ref bit to
* stop LRU from removing key=1
@@ -758,8 +745,7 @@ static void test_lru_sanity7(int map_type, int map_flags)
/* key=2 has been removed from the LRU */
key = 2;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
assert(map_equal(lru_map_fd, expected_map_fd));
@@ -806,21 +792,18 @@ static void test_lru_sanity8(int map_type, int map_flags)
assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
/* BPF_NOEXIST means: add new element if it doesn't exist */
- assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -1
- /* key=1 already exists */
- && errno == EEXIST);
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -EEXIST);
+ /* key=1 already exists */
/* insert key=2 element */
/* check that key=2 is not found */
key = 2;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* BPF_EXIST means: update existing element */
- assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -1 &&
- /* key=2 is not there */
- errno == ENOENT);
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -ENOENT);
+ /* key=2 is not there */
assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
@@ -830,8 +813,7 @@ static void test_lru_sanity8(int map_type, int map_flags)
/* check that key=3 is not found */
key = 3;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* check that key=1 can be found and do _not_ mark ref bit.
* this will be evicted on next update.
@@ -854,8 +836,7 @@ static void test_lru_sanity8(int map_type, int map_flags)
/* key=1 has been removed from the LRU */
key = 1;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
assert(map_equal(lru_map_fd, expected_map_fd));
@@ -878,6 +859,9 @@ int main(int argc, char **argv)
assert(nr_cpus != -1);
printf("nr_cpus:%d\n\n", nr_cpus);
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
for (f = 0; f < ARRAY_SIZE(map_flags); f++) {
unsigned int tgt_free = (map_flags[f] & BPF_F_NO_COMMON_LRU) ?
PERCPU_FREE_TARGET : LOCAL_FREE_TARGET;
diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
index edaffd43da83..6cd6ef9fc20b 100755
--- a/tools/testing/selftests/bpf/test_offload.py
+++ b/tools/testing/selftests/bpf/test_offload.py
@@ -184,7 +184,7 @@ def bpftool_prog_list(expected=None, ns=""):
def bpftool_map_list(expected=None, ns=""):
_, maps = bpftool("map show", JSON=True, ns=ns, fail=True)
# Remove the base maps
- maps = [m for m in maps if m not in base_maps and m.get('name') not in base_map_names]
+ maps = [m for m in maps if m not in base_maps and m.get('name') and m.get('name') not in base_map_names]
if expected is not None:
if len(maps) != expected:
fail(True, "%d BPF maps loaded, expected %d" %
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 2ecb73a65206..3561c97701f2 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -3,6 +3,7 @@
*/
#define _GNU_SOURCE
#include "test_progs.h"
+#include "testing_helpers.h"
#include "cgroup_helpers.h"
#include <argp.h>
#include <pthread.h>
@@ -17,6 +18,93 @@
#include <sys/socket.h>
#include <sys/un.h>
+static bool verbose(void)
+{
+ return env.verbosity > VERBOSE_NONE;
+}
+
+static void stdio_hijack_init(char **log_buf, size_t *log_cnt)
+{
+#ifdef __GLIBC__
+ if (verbose() && env.worker_id == -1) {
+ /* nothing to do, output to stdout by default */
+ return;
+ }
+
+ fflush(stdout);
+ fflush(stderr);
+
+ stdout = open_memstream(log_buf, log_cnt);
+ if (!stdout) {
+ stdout = env.stdout;
+ perror("open_memstream");
+ return;
+ }
+
+ if (env.subtest_state)
+ env.subtest_state->stdout = stdout;
+ else
+ env.test_state->stdout = stdout;
+
+ stderr = stdout;
+#endif
+}
+
+static void stdio_hijack(char **log_buf, size_t *log_cnt)
+{
+#ifdef __GLIBC__
+ if (verbose() && env.worker_id == -1) {
+ /* nothing to do, output to stdout by default */
+ return;
+ }
+
+ env.stdout = stdout;
+ env.stderr = stderr;
+
+ stdio_hijack_init(log_buf, log_cnt);
+#endif
+}
+
+static void stdio_restore_cleanup(void)
+{
+#ifdef __GLIBC__
+ if (verbose() && env.worker_id == -1) {
+ /* nothing to do, output to stdout by default */
+ return;
+ }
+
+ fflush(stdout);
+
+ if (env.subtest_state) {
+ fclose(env.subtest_state->stdout);
+ env.subtest_state->stdout = NULL;
+ stdout = env.test_state->stdout;
+ stderr = env.test_state->stdout;
+ } else {
+ fclose(env.test_state->stdout);
+ env.test_state->stdout = NULL;
+ }
+#endif
+}
+
+static void stdio_restore(void)
+{
+#ifdef __GLIBC__
+ if (verbose() && env.worker_id == -1) {
+ /* nothing to do, output to stdout by default */
+ return;
+ }
+
+ if (stdout == env.stdout)
+ return;
+
+ stdio_restore_cleanup();
+
+ stdout = env.stdout;
+ stderr = env.stderr;
+#endif
+}
+
/* Adapted from perf/util/string.c */
static bool glob_match(const char *str, const char *pat)
{
@@ -50,19 +138,8 @@ struct prog_test_def {
int test_num;
void (*run_test)(void);
void (*run_serial_test)(void);
- bool force_log;
- int error_cnt;
- int skip_cnt;
- int sub_succ_cnt;
bool should_run;
- bool tested;
bool need_cgroup_cleanup;
-
- char *subtest_name;
- int subtest_num;
-
- /* store counts before subtest started */
- int old_error_cnt;
};
/* Override C runtime library's usleep() implementation to ensure nanosleep()
@@ -84,12 +161,13 @@ static bool should_run(struct test_selector *sel, int num, const char *name)
int i;
for (i = 0; i < sel->blacklist.cnt; i++) {
- if (glob_match(name, sel->blacklist.strs[i]))
+ if (glob_match(name, sel->blacklist.tests[i].name) &&
+ !sel->blacklist.tests[i].subtest_cnt)
return false;
}
for (i = 0; i < sel->whitelist.cnt; i++) {
- if (glob_match(name, sel->whitelist.strs[i]))
+ if (glob_match(name, sel->whitelist.tests[i].name))
return true;
}
@@ -99,33 +177,138 @@ static bool should_run(struct test_selector *sel, int num, const char *name)
return num < sel->num_set_len && sel->num_set[num];
}
-static void dump_test_log(const struct prog_test_def *test, bool failed)
+static bool should_run_subtest(struct test_selector *sel,
+ struct test_selector *subtest_sel,
+ int subtest_num,
+ const char *test_name,
+ const char *subtest_name)
{
- if (stdout == env.stdout)
- return;
+ int i, j;
- /* worker always holds log */
- if (env.worker_id != -1)
- return;
+ for (i = 0; i < sel->blacklist.cnt; i++) {
+ if (glob_match(test_name, sel->blacklist.tests[i].name)) {
+ if (!sel->blacklist.tests[i].subtest_cnt)
+ return false;
+
+ for (j = 0; j < sel->blacklist.tests[i].subtest_cnt; j++) {
+ if (glob_match(subtest_name,
+ sel->blacklist.tests[i].subtests[j]))
+ return false;
+ }
+ }
+ }
- fflush(stdout); /* exports env.log_buf & env.log_cnt */
+ for (i = 0; i < sel->whitelist.cnt; i++) {
+ if (glob_match(test_name, sel->whitelist.tests[i].name)) {
+ if (!sel->whitelist.tests[i].subtest_cnt)
+ return true;
- if (env.verbosity > VERBOSE_NONE || test->force_log || failed) {
- if (env.log_cnt) {
- env.log_buf[env.log_cnt] = '\0';
- fprintf(env.stdout, "%s", env.log_buf);
- if (env.log_buf[env.log_cnt - 1] != '\n')
- fprintf(env.stdout, "\n");
+ for (j = 0; j < sel->whitelist.tests[i].subtest_cnt; j++) {
+ if (glob_match(subtest_name,
+ sel->whitelist.tests[i].subtests[j]))
+ return true;
+ }
}
}
+
+ if (!sel->whitelist.cnt && !subtest_sel->num_set)
+ return true;
+
+ return subtest_num < subtest_sel->num_set_len && subtest_sel->num_set[subtest_num];
+}
+
+static char *test_result(bool failed, bool skipped)
+{
+ return failed ? "FAIL" : (skipped ? "SKIP" : "OK");
}
-static void skip_account(void)
+static void print_test_log(char *log_buf, size_t log_cnt)
{
- if (env.test->skip_cnt) {
- env.skip_cnt++;
- env.test->skip_cnt = 0;
+ log_buf[log_cnt] = '\0';
+ fprintf(env.stdout, "%s", log_buf);
+ if (log_buf[log_cnt - 1] != '\n')
+ fprintf(env.stdout, "\n");
+}
+
+#define TEST_NUM_WIDTH 7
+
+static void print_test_name(int test_num, const char *test_name, char *result)
+{
+ fprintf(env.stdout, "#%-*d %s", TEST_NUM_WIDTH, test_num, test_name);
+
+ if (result)
+ fprintf(env.stdout, ":%s", result);
+
+ fprintf(env.stdout, "\n");
+}
+
+static void print_subtest_name(int test_num, int subtest_num,
+ const char *test_name, char *subtest_name,
+ char *result)
+{
+ char test_num_str[TEST_NUM_WIDTH + 1];
+
+ snprintf(test_num_str, sizeof(test_num_str), "%d/%d", test_num, subtest_num);
+
+ fprintf(env.stdout, "#%-*s %s/%s",
+ TEST_NUM_WIDTH, test_num_str,
+ test_name, subtest_name);
+
+ if (result)
+ fprintf(env.stdout, ":%s", result);
+
+ fprintf(env.stdout, "\n");
+}
+
+static void dump_test_log(const struct prog_test_def *test,
+ const struct test_state *test_state,
+ bool skip_ok_subtests,
+ bool par_exec_result)
+{
+ bool test_failed = test_state->error_cnt > 0;
+ bool force_log = test_state->force_log;
+ bool print_test = verbose() || force_log || test_failed;
+ int i;
+ struct subtest_state *subtest_state;
+ bool subtest_failed;
+ bool subtest_filtered;
+ bool print_subtest;
+
+ /* we do not print anything in the worker thread */
+ if (env.worker_id != -1)
+ return;
+
+ /* there is nothing to print when verbose log is used and execution
+ * is not in parallel mode
+ */
+ if (verbose() && !par_exec_result)
+ return;
+
+ if (test_state->log_cnt && print_test)
+ print_test_log(test_state->log_buf, test_state->log_cnt);
+
+ for (i = 0; i < test_state->subtest_num; i++) {
+ subtest_state = &test_state->subtest_states[i];
+ subtest_failed = subtest_state->error_cnt;
+ subtest_filtered = subtest_state->filtered;
+ print_subtest = verbose() || force_log || subtest_failed;
+
+ if ((skip_ok_subtests && !subtest_failed) || subtest_filtered)
+ continue;
+
+ if (subtest_state->log_cnt && print_subtest) {
+ print_test_log(subtest_state->log_buf,
+ subtest_state->log_cnt);
+ }
+
+ print_subtest_name(test->test_num, i + 1,
+ test->test_name, subtest_state->name,
+ test_result(subtest_state->error_cnt,
+ subtest_state->skipped));
}
+
+ print_test_name(test->test_num, test->test_name,
+ test_result(test_failed, test_state->skip_cnt));
}
static void stdio_restore(void);
@@ -135,7 +318,6 @@ static void stdio_restore(void);
*/
static void reset_affinity(void)
{
-
cpu_set_t cpuset;
int i, err;
@@ -178,68 +360,100 @@ static void restore_netns(void)
void test__end_subtest(void)
{
struct prog_test_def *test = env.test;
- int sub_error_cnt = test->error_cnt - test->old_error_cnt;
-
- dump_test_log(test, sub_error_cnt);
-
- fprintf(stdout, "#%d/%d %s/%s:%s\n",
- test->test_num, test->subtest_num, test->test_name, test->subtest_name,
- sub_error_cnt ? "FAIL" : (test->skip_cnt ? "SKIP" : "OK"));
+ struct test_state *test_state = env.test_state;
+ struct subtest_state *subtest_state = env.subtest_state;
+
+ if (subtest_state->error_cnt) {
+ test_state->error_cnt++;
+ } else {
+ if (!subtest_state->skipped)
+ test_state->sub_succ_cnt++;
+ else
+ test_state->skip_cnt++;
+ }
- if (sub_error_cnt)
- test->error_cnt++;
- else if (test->skip_cnt == 0)
- test->sub_succ_cnt++;
- skip_account();
+ if (verbose() && !env.workers)
+ print_subtest_name(test->test_num, test_state->subtest_num,
+ test->test_name, subtest_state->name,
+ test_result(subtest_state->error_cnt,
+ subtest_state->skipped));
- free(test->subtest_name);
- test->subtest_name = NULL;
+ stdio_restore_cleanup();
+ env.subtest_state = NULL;
}
-bool test__start_subtest(const char *name)
+bool test__start_subtest(const char *subtest_name)
{
struct prog_test_def *test = env.test;
+ struct test_state *state = env.test_state;
+ struct subtest_state *subtest_state;
+ size_t sub_state_size = sizeof(*subtest_state);
- if (test->subtest_name)
+ if (env.subtest_state)
test__end_subtest();
- test->subtest_num++;
+ state->subtest_num++;
+ state->subtest_states =
+ realloc(state->subtest_states,
+ state->subtest_num * sub_state_size);
+ if (!state->subtest_states) {
+ fprintf(stderr, "Not enough memory to allocate subtest result\n");
+ return false;
+ }
+
+ subtest_state = &state->subtest_states[state->subtest_num - 1];
- if (!name || !name[0]) {
+ memset(subtest_state, 0, sub_state_size);
+
+ if (!subtest_name || !subtest_name[0]) {
fprintf(env.stderr,
"Subtest #%d didn't provide sub-test name!\n",
- test->subtest_num);
+ state->subtest_num);
return false;
}
- if (!should_run(&env.subtest_selector, test->subtest_num, name))
- return false;
-
- test->subtest_name = strdup(name);
- if (!test->subtest_name) {
+ subtest_state->name = strdup(subtest_name);
+ if (!subtest_state->name) {
fprintf(env.stderr,
"Subtest #%d: failed to copy subtest name!\n",
- test->subtest_num);
+ state->subtest_num);
+ return false;
+ }
+
+ if (!should_run_subtest(&env.test_selector,
+ &env.subtest_selector,
+ state->subtest_num,
+ test->test_name,
+ subtest_name)) {
+ subtest_state->filtered = true;
return false;
}
- env.test->old_error_cnt = env.test->error_cnt;
+
+ env.subtest_state = subtest_state;
+ stdio_hijack_init(&subtest_state->log_buf, &subtest_state->log_cnt);
return true;
}
void test__force_log(void)
{
- env.test->force_log = true;
+ env.test_state->force_log = true;
}
void test__skip(void)
{
- env.test->skip_cnt++;
+ if (env.subtest_state)
+ env.subtest_state->skipped = true;
+ else
+ env.test_state->skip_cnt++;
}
void test__fail(void)
{
- env.test->error_cnt++;
+ if (env.subtest_state)
+ env.subtest_state->error_cnt++;
+ else
+ env.test_state->error_cnt++;
}
int test__join_cgroup(const char *path)
@@ -418,14 +632,14 @@ static void unload_bpf_testmod(void)
fprintf(env.stderr, "Failed to trigger kernel-side RCU sync!\n");
if (delete_module("bpf_testmod", 0)) {
if (errno == ENOENT) {
- if (env.verbosity > VERBOSE_NONE)
+ if (verbose())
fprintf(stdout, "bpf_testmod.ko is already unloaded.\n");
return;
}
fprintf(env.stderr, "Failed to unload bpf_testmod.ko from kernel: %d\n", -errno);
return;
}
- if (env.verbosity > VERBOSE_NONE)
+ if (verbose())
fprintf(stdout, "Successfully unloaded bpf_testmod.ko.\n");
}
@@ -436,7 +650,7 @@ static int load_bpf_testmod(void)
/* ensure previous instance of the module is unloaded */
unload_bpf_testmod();
- if (env.verbosity > VERBOSE_NONE)
+ if (verbose())
fprintf(stdout, "Loading bpf_testmod.ko...\n");
fd = open("bpf_testmod.ko", O_RDONLY);
@@ -451,7 +665,7 @@ static int load_bpf_testmod(void)
}
close(fd);
- if (env.verbosity > VERBOSE_NONE)
+ if (verbose())
fprintf(stdout, "Successfully loaded bpf_testmod.ko.\n");
return 0;
}
@@ -472,8 +686,11 @@ static struct prog_test_def prog_test_defs[] = {
#include <prog_tests/tests.h>
#undef DEFINE_TEST
};
+
static const int prog_test_cnt = ARRAY_SIZE(prog_test_defs);
+static struct test_state test_states[ARRAY_SIZE(prog_test_defs)];
+
const char *argp_program_version = "test_progs 0.1";
const char *argp_program_bug_address = "<bpf@vger.kernel.org>";
static const char argp_program_doc[] = "BPF selftests test runner";
@@ -527,63 +744,29 @@ static int libbpf_print_fn(enum libbpf_print_level level,
return 0;
}
-static void free_str_set(const struct str_set *set)
+static void free_test_filter_set(const struct test_filter_set *set)
{
- int i;
+ int i, j;
if (!set)
return;
- for (i = 0; i < set->cnt; i++)
- free((void *)set->strs[i]);
- free(set->strs);
-}
-
-static int parse_str_list(const char *s, struct str_set *set, bool is_glob_pattern)
-{
- char *input, *state = NULL, *next, **tmp, **strs = NULL;
- int i, cnt = 0;
+ for (i = 0; i < set->cnt; i++) {
+ free((void *)set->tests[i].name);
+ for (j = 0; j < set->tests[i].subtest_cnt; j++)
+ free((void *)set->tests[i].subtests[j]);
- input = strdup(s);
- if (!input)
- return -ENOMEM;
-
- while ((next = strtok_r(state ? NULL : input, ",", &state))) {
- tmp = realloc(strs, sizeof(*strs) * (cnt + 1));
- if (!tmp)
- goto err;
- strs = tmp;
-
- if (is_glob_pattern) {
- strs[cnt] = strdup(next);
- if (!strs[cnt])
- goto err;
- } else {
- strs[cnt] = malloc(strlen(next) + 2 + 1);
- if (!strs[cnt])
- goto err;
- sprintf(strs[cnt], "*%s*", next);
- }
-
- cnt++;
+ free((void *)set->tests[i].subtests);
}
- tmp = realloc(set->strs, sizeof(*strs) * (cnt + set->cnt));
- if (!tmp)
- goto err;
- memcpy(tmp + set->cnt, strs, sizeof(*strs) * cnt);
- set->strs = (const char **)tmp;
- set->cnt += cnt;
+ free((void *)set->tests);
+}
- free(input);
- free(strs);
- return 0;
-err:
- for (i = 0; i < cnt; i++)
- free(strs[i]);
- free(strs);
- free(input);
- return -ENOMEM;
+static void free_test_selector(struct test_selector *test_selector)
+{
+ free_test_filter_set(&test_selector->blacklist);
+ free_test_filter_set(&test_selector->whitelist);
+ free(test_selector->num_set);
}
extern int extra_prog_load_log_flags;
@@ -615,33 +798,17 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
}
case ARG_TEST_NAME_GLOB_ALLOWLIST:
case ARG_TEST_NAME: {
- char *subtest_str = strchr(arg, '/');
-
- if (subtest_str) {
- *subtest_str = '\0';
- if (parse_str_list(subtest_str + 1,
- &env->subtest_selector.whitelist,
- key == ARG_TEST_NAME_GLOB_ALLOWLIST))
- return -ENOMEM;
- }
- if (parse_str_list(arg, &env->test_selector.whitelist,
- key == ARG_TEST_NAME_GLOB_ALLOWLIST))
+ if (parse_test_list(arg,
+ &env->test_selector.whitelist,
+ key == ARG_TEST_NAME_GLOB_ALLOWLIST))
return -ENOMEM;
break;
}
case ARG_TEST_NAME_GLOB_DENYLIST:
case ARG_TEST_NAME_BLACKLIST: {
- char *subtest_str = strchr(arg, '/');
-
- if (subtest_str) {
- *subtest_str = '\0';
- if (parse_str_list(subtest_str + 1,
- &env->subtest_selector.blacklist,
- key == ARG_TEST_NAME_GLOB_DENYLIST))
- return -ENOMEM;
- }
- if (parse_str_list(arg, &env->test_selector.blacklist,
- key == ARG_TEST_NAME_GLOB_DENYLIST))
+ if (parse_test_list(arg,
+ &env->test_selector.blacklist,
+ key == ARG_TEST_NAME_GLOB_DENYLIST))
return -ENOMEM;
break;
}
@@ -665,7 +832,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
}
}
- if (env->verbosity > VERBOSE_NONE) {
+ if (verbose()) {
if (setenv("SELFTESTS_VERBOSE", "1", 1) == -1) {
fprintf(stderr,
"Unable to setenv SELFTESTS_VERBOSE=1 (errno=%d)",
@@ -706,44 +873,6 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
return 0;
}
-static void stdio_hijack(void)
-{
-#ifdef __GLIBC__
- env.stdout = stdout;
- env.stderr = stderr;
-
- if (env.verbosity > VERBOSE_NONE && env.worker_id == -1) {
- /* nothing to do, output to stdout by default */
- return;
- }
-
- /* stdout and stderr -> buffer */
- fflush(stdout);
-
- stdout = open_memstream(&env.log_buf, &env.log_cnt);
- if (!stdout) {
- stdout = env.stdout;
- perror("open_memstream");
- return;
- }
-
- stderr = stdout;
-#endif
-}
-
-static void stdio_restore(void)
-{
-#ifdef __GLIBC__
- if (stdout == env.stdout)
- return;
-
- fclose(stdout);
-
- stdout = env.stdout;
- stderr = env.stderr;
-#endif
-}
-
/*
* Determine if test_progs is running as a "flavored" test runner and switch
* into corresponding sub-directory to load correct BPF objects.
@@ -761,13 +890,15 @@ int cd_flavor_subdir(const char *exec_name)
const char *flavor = strrchr(exec_name, '/');
if (!flavor)
- return 0;
- flavor++;
+ flavor = exec_name;
+ else
+ flavor++;
+
flavor = strrchr(flavor, '-');
if (!flavor)
return 0;
flavor++;
- if (env.verbosity > VERBOSE_NONE)
+ if (verbose())
fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor);
return chdir(flavor);
@@ -820,8 +951,10 @@ void crash_handler(int signum)
sz = backtrace(bt, ARRAY_SIZE(bt));
- if (env.test)
- dump_test_log(env.test, true);
+ if (env.test) {
+ env.test_state->error_cnt++;
+ dump_test_log(env.test, env.test_state, true, false);
+ }
if (env.stdout)
stdio_restore();
if (env.worker_id != -1)
@@ -843,28 +976,22 @@ static int current_test_idx;
static pthread_mutex_t current_test_lock;
static pthread_mutex_t stdout_output_lock;
-struct test_result {
- int error_cnt;
- int skip_cnt;
- int sub_succ_cnt;
-
- size_t log_cnt;
- char *log_buf;
-};
-
-static struct test_result test_results[ARRAY_SIZE(prog_test_defs)];
-
static inline const char *str_msg(const struct msg *msg, char *buf)
{
switch (msg->type) {
case MSG_DO_TEST:
- sprintf(buf, "MSG_DO_TEST %d", msg->do_test.test_num);
+ sprintf(buf, "MSG_DO_TEST %d", msg->do_test.num);
break;
case MSG_TEST_DONE:
sprintf(buf, "MSG_TEST_DONE %d (log: %d)",
- msg->test_done.test_num,
+ msg->test_done.num,
msg->test_done.have_log);
break;
+ case MSG_SUBTEST_DONE:
+ sprintf(buf, "MSG_SUBTEST_DONE %d (log: %d)",
+ msg->subtest_done.num,
+ msg->subtest_done.have_log);
+ break;
case MSG_TEST_LOG:
sprintf(buf, "MSG_TEST_LOG (cnt: %ld, last: %d)",
strlen(msg->test_log.log_buf),
@@ -907,8 +1034,12 @@ static int recv_message(int sock, struct msg *msg)
static void run_one_test(int test_num)
{
struct prog_test_def *test = &prog_test_defs[test_num];
+ struct test_state *state = &test_states[test_num];
env.test = test;
+ env.test_state = state;
+
+ stdio_hijack(&state->log_buf, &state->log_cnt);
if (test->run_test)
test->run_test();
@@ -916,17 +1047,23 @@ static void run_one_test(int test_num)
test->run_serial_test();
/* ensure last sub-test is finalized properly */
- if (test->subtest_name)
+ if (env.subtest_state)
test__end_subtest();
- test->tested = true;
+ state->tested = true;
- dump_test_log(test, test->error_cnt);
+ if (verbose() && env.worker_id == -1)
+ print_test_name(test_num + 1, test->test_name,
+ test_result(state->error_cnt, state->skip_cnt));
reset_affinity();
restore_netns();
if (test->need_cgroup_cleanup)
cleanup_cgroup_environment();
+
+ stdio_restore();
+
+ dump_test_log(test, state, false, false);
}
struct dispatch_data {
@@ -934,18 +1071,90 @@ struct dispatch_data {
int sock_fd;
};
+static int read_prog_test_msg(int sock_fd, struct msg *msg, enum msg_type type)
+{
+ if (recv_message(sock_fd, msg) < 0)
+ return 1;
+
+ if (msg->type != type) {
+ printf("%s: unexpected message type %d. expected %d\n", __func__, msg->type, type);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int dispatch_thread_read_log(int sock_fd, char **log_buf, size_t *log_cnt)
+{
+ FILE *log_fp = NULL;
+ int result = 0;
+
+ log_fp = open_memstream(log_buf, log_cnt);
+ if (!log_fp)
+ return 1;
+
+ while (true) {
+ struct msg msg;
+
+ if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_LOG)) {
+ result = 1;
+ goto out;
+ }
+
+ fprintf(log_fp, "%s", msg.test_log.log_buf);
+ if (msg.test_log.is_last)
+ break;
+ }
+
+out:
+ fclose(log_fp);
+ log_fp = NULL;
+ return result;
+}
+
+static int dispatch_thread_send_subtests(int sock_fd, struct test_state *state)
+{
+ struct msg msg;
+ struct subtest_state *subtest_state;
+ int subtest_num = state->subtest_num;
+
+ state->subtest_states = malloc(subtest_num * sizeof(*subtest_state));
+
+ for (int i = 0; i < subtest_num; i++) {
+ subtest_state = &state->subtest_states[i];
+
+ memset(subtest_state, 0, sizeof(*subtest_state));
+
+ if (read_prog_test_msg(sock_fd, &msg, MSG_SUBTEST_DONE))
+ return 1;
+
+ subtest_state->name = strdup(msg.subtest_done.name);
+ subtest_state->error_cnt = msg.subtest_done.error_cnt;
+ subtest_state->skipped = msg.subtest_done.skipped;
+ subtest_state->filtered = msg.subtest_done.filtered;
+
+ /* collect all logs */
+ if (msg.subtest_done.have_log)
+ if (dispatch_thread_read_log(sock_fd,
+ &subtest_state->log_buf,
+ &subtest_state->log_cnt))
+ return 1;
+ }
+
+ return 0;
+}
+
static void *dispatch_thread(void *ctx)
{
struct dispatch_data *data = ctx;
int sock_fd;
- FILE *log_fp = NULL;
sock_fd = data->sock_fd;
while (true) {
int test_to_run = -1;
struct prog_test_def *test;
- struct test_result *result;
+ struct test_state *state;
/* grab a test */
{
@@ -970,8 +1179,9 @@ static void *dispatch_thread(void *ctx)
{
struct msg msg_do_test;
+ memset(&msg_do_test, 0, sizeof(msg_do_test));
msg_do_test.type = MSG_DO_TEST;
- msg_do_test.do_test.test_num = test_to_run;
+ msg_do_test.do_test.num = test_to_run;
if (send_message(sock_fd, &msg_do_test) < 0) {
perror("Fail to send command");
goto done;
@@ -980,72 +1190,45 @@ static void *dispatch_thread(void *ctx)
}
/* wait for test done */
- {
- int err;
- struct msg msg_test_done;
+ do {
+ struct msg msg;
- err = recv_message(sock_fd, &msg_test_done);
- if (err < 0)
- goto error;
- if (msg_test_done.type != MSG_TEST_DONE)
+ if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_DONE))
goto error;
- if (test_to_run != msg_test_done.test_done.test_num)
+ if (test_to_run != msg.test_done.num)
goto error;
- test->tested = true;
- result = &test_results[test_to_run];
-
- result->error_cnt = msg_test_done.test_done.error_cnt;
- result->skip_cnt = msg_test_done.test_done.skip_cnt;
- result->sub_succ_cnt = msg_test_done.test_done.sub_succ_cnt;
+ state = &test_states[test_to_run];
+ state->tested = true;
+ state->error_cnt = msg.test_done.error_cnt;
+ state->skip_cnt = msg.test_done.skip_cnt;
+ state->sub_succ_cnt = msg.test_done.sub_succ_cnt;
+ state->subtest_num = msg.test_done.subtest_num;
/* collect all logs */
- if (msg_test_done.test_done.have_log) {
- log_fp = open_memstream(&result->log_buf, &result->log_cnt);
- if (!log_fp)
+ if (msg.test_done.have_log) {
+ if (dispatch_thread_read_log(sock_fd,
+ &state->log_buf,
+ &state->log_cnt))
goto error;
+ }
- while (true) {
- struct msg msg_log;
-
- if (recv_message(sock_fd, &msg_log) < 0)
- goto error;
- if (msg_log.type != MSG_TEST_LOG)
- goto error;
+ /* collect all subtests and subtest logs */
+ if (!state->subtest_num)
+ break;
- fprintf(log_fp, "%s", msg_log.test_log.log_buf);
- if (msg_log.test_log.is_last)
- break;
- }
- fclose(log_fp);
- log_fp = NULL;
- }
- /* output log */
- {
- pthread_mutex_lock(&stdout_output_lock);
-
- if (result->log_cnt) {
- result->log_buf[result->log_cnt] = '\0';
- fprintf(stdout, "%s", result->log_buf);
- if (result->log_buf[result->log_cnt - 1] != '\n')
- fprintf(stdout, "\n");
- }
-
- fprintf(stdout, "#%d %s:%s\n",
- test->test_num, test->test_name,
- result->error_cnt ? "FAIL" : (result->skip_cnt ? "SKIP" : "OK"));
-
- pthread_mutex_unlock(&stdout_output_lock);
- }
+ if (dispatch_thread_send_subtests(sock_fd, state))
+ goto error;
+ } while (false);
- } /* wait for test done */
+ pthread_mutex_lock(&stdout_output_lock);
+ dump_test_log(test, state, false, true);
+ pthread_mutex_unlock(&stdout_output_lock);
} /* while (true) */
error:
if (env.debug)
fprintf(stderr, "[%d]: Protocol/IO error: %s.\n", data->worker_id, strerror(errno));
- if (log_fp)
- fclose(log_fp);
done:
{
struct msg msg_exit;
@@ -1060,38 +1243,56 @@ done:
return NULL;
}
-static void print_all_error_logs(void)
+static void calculate_summary_and_print_errors(struct test_env *env)
{
int i;
+ int succ_cnt = 0, fail_cnt = 0, sub_succ_cnt = 0, skip_cnt = 0;
- if (env.fail_cnt)
- fprintf(stdout, "\nAll error logs:\n");
-
- /* print error logs again */
for (i = 0; i < prog_test_cnt; i++) {
- struct prog_test_def *test;
- struct test_result *result;
-
- test = &prog_test_defs[i];
- result = &test_results[i];
+ struct test_state *state = &test_states[i];
- if (!test->tested || !result->error_cnt)
+ if (!state->tested)
continue;
- fprintf(stdout, "\n#%d %s:%s\n",
- test->test_num, test->test_name,
- result->error_cnt ? "FAIL" : (result->skip_cnt ? "SKIP" : "OK"));
+ sub_succ_cnt += state->sub_succ_cnt;
+ skip_cnt += state->skip_cnt;
+
+ if (state->error_cnt)
+ fail_cnt++;
+ else
+ succ_cnt++;
+ }
- if (result->log_cnt) {
- result->log_buf[result->log_cnt] = '\0';
- fprintf(stdout, "%s", result->log_buf);
- if (result->log_buf[result->log_cnt - 1] != '\n')
- fprintf(stdout, "\n");
+ /*
+ * We only print error logs summary when there are failed tests and
+ * verbose mode is not enabled. Otherwise, results may be incosistent.
+ *
+ */
+ if (!verbose() && fail_cnt) {
+ printf("\nAll error logs:\n");
+
+ /* print error logs again */
+ for (i = 0; i < prog_test_cnt; i++) {
+ struct prog_test_def *test = &prog_test_defs[i];
+ struct test_state *state = &test_states[i];
+
+ if (!state->tested || !state->error_cnt)
+ continue;
+
+ dump_test_log(test, state, true, true);
}
}
+
+ printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
+ succ_cnt, sub_succ_cnt, skip_cnt, fail_cnt);
+
+ env->succ_cnt = succ_cnt;
+ env->sub_succ_cnt = sub_succ_cnt;
+ env->fail_cnt = fail_cnt;
+ env->skip_cnt = skip_cnt;
}
-static int server_main(void)
+static void server_main(void)
{
pthread_t *dispatcher_threads;
struct dispatch_data *data;
@@ -1147,60 +1348,18 @@ static int server_main(void)
for (int i = 0; i < prog_test_cnt; i++) {
struct prog_test_def *test = &prog_test_defs[i];
- struct test_result *result = &test_results[i];
if (!test->should_run || !test->run_serial_test)
continue;
- stdio_hijack();
-
run_one_test(i);
-
- stdio_restore();
- if (env.log_buf) {
- result->log_cnt = env.log_cnt;
- result->log_buf = strdup(env.log_buf);
-
- free(env.log_buf);
- env.log_buf = NULL;
- env.log_cnt = 0;
- }
- restore_netns();
-
- fprintf(stdout, "#%d %s:%s\n",
- test->test_num, test->test_name,
- test->error_cnt ? "FAIL" : (test->skip_cnt ? "SKIP" : "OK"));
-
- result->error_cnt = test->error_cnt;
- result->skip_cnt = test->skip_cnt;
- result->sub_succ_cnt = test->sub_succ_cnt;
}
/* generate summary */
fflush(stderr);
fflush(stdout);
- for (i = 0; i < prog_test_cnt; i++) {
- struct prog_test_def *current_test;
- struct test_result *result;
-
- current_test = &prog_test_defs[i];
- result = &test_results[i];
-
- if (!current_test->tested)
- continue;
-
- env.succ_cnt += result->error_cnt ? 0 : 1;
- env.skip_cnt += result->skip_cnt;
- if (result->error_cnt)
- env.fail_cnt++;
- env.sub_succ_cnt += result->sub_succ_cnt;
- }
-
- print_all_error_logs();
-
- fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
- env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt);
+ calculate_summary_and_print_errors(&env);
/* reap all workers */
for (i = 0; i < env.workers; i++) {
@@ -1210,8 +1369,91 @@ static int server_main(void)
if (pid != env.worker_pids[i])
perror("Unable to reap worker");
}
+}
- return 0;
+static void worker_main_send_log(int sock, char *log_buf, size_t log_cnt)
+{
+ char *src;
+ size_t slen;
+
+ src = log_buf;
+ slen = log_cnt;
+ while (slen) {
+ struct msg msg_log;
+ char *dest;
+ size_t len;
+
+ memset(&msg_log, 0, sizeof(msg_log));
+ msg_log.type = MSG_TEST_LOG;
+ dest = msg_log.test_log.log_buf;
+ len = slen >= MAX_LOG_TRUNK_SIZE ? MAX_LOG_TRUNK_SIZE : slen;
+ memcpy(dest, src, len);
+
+ src += len;
+ slen -= len;
+ if (!slen)
+ msg_log.test_log.is_last = true;
+
+ assert(send_message(sock, &msg_log) >= 0);
+ }
+}
+
+static void free_subtest_state(struct subtest_state *state)
+{
+ if (state->log_buf) {
+ free(state->log_buf);
+ state->log_buf = NULL;
+ state->log_cnt = 0;
+ }
+ free(state->name);
+ state->name = NULL;
+}
+
+static int worker_main_send_subtests(int sock, struct test_state *state)
+{
+ int i, result = 0;
+ struct msg msg;
+ struct subtest_state *subtest_state;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.type = MSG_SUBTEST_DONE;
+
+ for (i = 0; i < state->subtest_num; i++) {
+ subtest_state = &state->subtest_states[i];
+
+ msg.subtest_done.num = i;
+
+ strncpy(msg.subtest_done.name, subtest_state->name, MAX_SUBTEST_NAME);
+
+ msg.subtest_done.error_cnt = subtest_state->error_cnt;
+ msg.subtest_done.skipped = subtest_state->skipped;
+ msg.subtest_done.filtered = subtest_state->filtered;
+ msg.subtest_done.have_log = false;
+
+ if (verbose() || state->force_log || subtest_state->error_cnt) {
+ if (subtest_state->log_cnt)
+ msg.subtest_done.have_log = true;
+ }
+
+ if (send_message(sock, &msg) < 0) {
+ perror("Fail to send message done");
+ result = 1;
+ goto out;
+ }
+
+ /* send logs */
+ if (msg.subtest_done.have_log)
+ worker_main_send_log(sock, subtest_state->log_buf, subtest_state->log_cnt);
+
+ free_subtest_state(subtest_state);
+ free(subtest_state->name);
+ }
+
+out:
+ for (; i < state->subtest_num; i++)
+ free_subtest_state(&state->subtest_states[i]);
+ free(state->subtest_states);
+ return result;
}
static int worker_main(int sock)
@@ -1232,12 +1474,10 @@ static int worker_main(int sock)
env.worker_id);
goto out;
case MSG_DO_TEST: {
- int test_to_run;
- struct prog_test_def *test;
- struct msg msg_done;
-
- test_to_run = msg.do_test.test_num;
- test = &prog_test_defs[test_to_run];
+ int test_to_run = msg.do_test.num;
+ struct prog_test_def *test = &prog_test_defs[test_to_run];
+ struct test_state *state = &test_states[test_to_run];
+ struct msg msg;
if (env.debug)
fprintf(stderr, "[%d]: #%d:%s running.\n",
@@ -1245,60 +1485,40 @@ static int worker_main(int sock)
test_to_run + 1,
test->test_name);
- stdio_hijack();
-
run_one_test(test_to_run);
- stdio_restore();
-
- memset(&msg_done, 0, sizeof(msg_done));
- msg_done.type = MSG_TEST_DONE;
- msg_done.test_done.test_num = test_to_run;
- msg_done.test_done.error_cnt = test->error_cnt;
- msg_done.test_done.skip_cnt = test->skip_cnt;
- msg_done.test_done.sub_succ_cnt = test->sub_succ_cnt;
- msg_done.test_done.have_log = false;
-
- if (env.verbosity > VERBOSE_NONE || test->force_log || test->error_cnt) {
- if (env.log_cnt)
- msg_done.test_done.have_log = true;
+ memset(&msg, 0, sizeof(msg));
+ msg.type = MSG_TEST_DONE;
+ msg.test_done.num = test_to_run;
+ msg.test_done.error_cnt = state->error_cnt;
+ msg.test_done.skip_cnt = state->skip_cnt;
+ msg.test_done.sub_succ_cnt = state->sub_succ_cnt;
+ msg.test_done.subtest_num = state->subtest_num;
+ msg.test_done.have_log = false;
+
+ if (verbose() || state->force_log || state->error_cnt) {
+ if (state->log_cnt)
+ msg.test_done.have_log = true;
}
- if (send_message(sock, &msg_done) < 0) {
+ if (send_message(sock, &msg) < 0) {
perror("Fail to send message done");
goto out;
}
/* send logs */
- if (msg_done.test_done.have_log) {
- char *src;
- size_t slen;
-
- src = env.log_buf;
- slen = env.log_cnt;
- while (slen) {
- struct msg msg_log;
- char *dest;
- size_t len;
-
- memset(&msg_log, 0, sizeof(msg_log));
- msg_log.type = MSG_TEST_LOG;
- dest = msg_log.test_log.log_buf;
- len = slen >= MAX_LOG_TRUNK_SIZE ? MAX_LOG_TRUNK_SIZE : slen;
- memcpy(dest, src, len);
-
- src += len;
- slen -= len;
- if (!slen)
- msg_log.test_log.is_last = true;
-
- assert(send_message(sock, &msg_log) >= 0);
- }
- }
- if (env.log_buf) {
- free(env.log_buf);
- env.log_buf = NULL;
- env.log_cnt = 0;
+ if (msg.test_done.have_log)
+ worker_main_send_log(sock, state->log_buf, state->log_cnt);
+
+ if (state->log_buf) {
+ free(state->log_buf);
+ state->log_buf = NULL;
+ state->log_cnt = 0;
}
+
+ if (state->subtest_num)
+ if (worker_main_send_subtests(sock, state))
+ goto out;
+
if (env.debug)
fprintf(stderr, "[%d]: #%d:%s done.\n",
env.worker_id,
@@ -1316,6 +1536,23 @@ out:
return 0;
}
+static void free_test_states(void)
+{
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(prog_test_defs); i++) {
+ struct test_state *test_state = &test_states[i];
+
+ for (j = 0; j < test_state->subtest_num; j++)
+ free_subtest_state(&test_state->subtest_states[j]);
+
+ free(test_state->subtest_states);
+ free(test_state->log_buf);
+ test_state->subtest_states = NULL;
+ test_state->log_buf = NULL;
+ }
+}
+
int main(int argc, char **argv)
{
static const struct argp argp = {
@@ -1367,11 +1604,8 @@ int main(int argc, char **argv)
struct prog_test_def *test = &prog_test_defs[i];
test->test_num = i + 1;
- if (should_run(&env.test_selector,
- test->test_num, test->test_name))
- test->should_run = true;
- else
- test->should_run = false;
+ test->should_run = should_run(&env.test_selector,
+ test->test_num, test->test_name);
if ((test->run_test == NULL && test->run_serial_test == NULL) ||
(test->run_test != NULL && test->run_serial_test != NULL)) {
@@ -1428,7 +1662,6 @@ int main(int argc, char **argv)
for (i = 0; i < prog_test_cnt; i++) {
struct prog_test_def *test = &prog_test_defs[i];
- struct test_result *result;
if (!test->should_run)
continue;
@@ -1444,34 +1677,7 @@ int main(int argc, char **argv)
continue;
}
- stdio_hijack();
-
run_one_test(i);
-
- stdio_restore();
-
- fprintf(env.stdout, "#%d %s:%s\n",
- test->test_num, test->test_name,
- test->error_cnt ? "FAIL" : (test->skip_cnt ? "SKIP" : "OK"));
-
- result = &test_results[i];
- result->error_cnt = test->error_cnt;
- if (env.log_buf) {
- result->log_buf = strdup(env.log_buf);
- result->log_cnt = env.log_cnt;
-
- free(env.log_buf);
- env.log_buf = NULL;
- env.log_cnt = 0;
- }
-
- if (test->error_cnt)
- env.fail_cnt++;
- else
- env.succ_cnt++;
-
- skip_account();
- env.sub_succ_cnt += test->sub_succ_cnt;
}
if (env.get_test_cnt) {
@@ -1482,21 +1688,16 @@ int main(int argc, char **argv)
if (env.list_test_names)
goto out;
- print_all_error_logs();
-
- fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
- env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt);
+ calculate_summary_and_print_errors(&env);
close(env.saved_netns_fd);
out:
if (!env.list_test_names && env.has_testmod)
unload_bpf_testmod();
- free_str_set(&env.test_selector.blacklist);
- free_str_set(&env.test_selector.whitelist);
- free(env.test_selector.num_set);
- free_str_set(&env.subtest_selector.blacklist);
- free_str_set(&env.subtest_selector.whitelist);
- free(env.subtest_selector.num_set);
+
+ free_test_selector(&env.test_selector);
+ free_test_selector(&env.subtest_selector);
+ free_test_states();
if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0)
return EXIT_NO_TEST;
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index 93c1ff705533..5fe1365c2bb1 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -25,6 +25,7 @@ typedef __u16 __sum16;
#include <sys/wait.h>
#include <sys/types.h>
#include <sys/time.h>
+#include <sys/param.h>
#include <fcntl.h>
#include <pthread.h>
#include <linux/bpf.h>
@@ -37,7 +38,6 @@ typedef __u16 __sum16;
#include <bpf/bpf_endian.h>
#include "trace_helpers.h"
#include "testing_helpers.h"
-#include "flow_dissector_load.h"
enum verbosity {
VERBOSE_NONE,
@@ -46,18 +46,52 @@ enum verbosity {
VERBOSE_SUPER,
};
-struct str_set {
- const char **strs;
+struct test_filter {
+ char *name;
+ char **subtests;
+ int subtest_cnt;
+};
+
+struct test_filter_set {
+ struct test_filter *tests;
int cnt;
};
struct test_selector {
- struct str_set whitelist;
- struct str_set blacklist;
+ struct test_filter_set whitelist;
+ struct test_filter_set blacklist;
bool *num_set;
int num_set_len;
};
+struct subtest_state {
+ char *name;
+ size_t log_cnt;
+ char *log_buf;
+ int error_cnt;
+ bool skipped;
+ bool filtered;
+
+ FILE *stdout;
+};
+
+struct test_state {
+ bool tested;
+ bool force_log;
+
+ int error_cnt;
+ int skip_cnt;
+ int sub_succ_cnt;
+
+ struct subtest_state *subtest_states;
+ int subtest_num;
+
+ size_t log_cnt;
+ char *log_buf;
+
+ FILE *stdout;
+};
+
struct test_env {
struct test_selector test_selector;
struct test_selector subtest_selector;
@@ -70,12 +104,12 @@ struct test_env {
bool get_test_cnt;
bool list_test_names;
- struct prog_test_def *test; /* current running tests */
+ struct prog_test_def *test; /* current running test */
+ struct test_state *test_state; /* current running test state */
+ struct subtest_state *subtest_state; /* current running subtest state */
FILE *stdout;
FILE *stderr;
- char *log_buf;
- size_t log_cnt;
int nr_cpus;
int succ_cnt; /* successful tests */
@@ -92,39 +126,51 @@ struct test_env {
};
#define MAX_LOG_TRUNK_SIZE 8192
+#define MAX_SUBTEST_NAME 1024
enum msg_type {
MSG_DO_TEST = 0,
MSG_TEST_DONE = 1,
MSG_TEST_LOG = 2,
+ MSG_SUBTEST_DONE = 3,
MSG_EXIT = 255,
};
struct msg {
enum msg_type type;
union {
struct {
- int test_num;
+ int num;
} do_test;
struct {
- int test_num;
+ int num;
int sub_succ_cnt;
int error_cnt;
int skip_cnt;
bool have_log;
+ int subtest_num;
} test_done;
struct {
char log_buf[MAX_LOG_TRUNK_SIZE + 1];
bool is_last;
} test_log;
+ struct {
+ int num;
+ char name[MAX_SUBTEST_NAME + 1];
+ int error_cnt;
+ bool skipped;
+ bool filtered;
+ bool have_log;
+ } subtest_done;
};
};
extern struct test_env env;
-extern void test__force_log();
-extern bool test__start_subtest(const char *name);
-extern void test__skip(void);
-extern void test__fail(void);
-extern int test__join_cgroup(const char *path);
+void test__force_log(void);
+bool test__start_subtest(const char *name);
+void test__end_subtest(void);
+void test__skip(void);
+void test__fail(void);
+int test__join_cgroup(const char *path);
#define PRINT_FAIL(format...) \
({ \
@@ -267,6 +313,17 @@ extern int test__join_cgroup(const char *path);
___ok; \
})
+#define ASSERT_HAS_SUBSTR(str, substr, name) ({ \
+ static int duration = 0; \
+ const char *___str = str; \
+ const char *___substr = substr; \
+ bool ___ok = strstr(___str, ___substr) != NULL; \
+ CHECK(!___ok, (name), \
+ "unexpected %s: '%s' is not a substring of '%s'\n", \
+ (name), ___substr, ___str); \
+ ___ok; \
+})
+
#define ASSERT_OK(res, name) ({ \
static int duration = 0; \
long long ___res = (res); \
@@ -332,6 +389,8 @@ int trigger_module_test_write(int write_sz);
#define SYS_NANOSLEEP_KPROBE_NAME "__x64_sys_nanosleep"
#elif defined(__s390x__)
#define SYS_NANOSLEEP_KPROBE_NAME "__s390x_sys_nanosleep"
+#elif defined(__aarch64__)
+#define SYS_NANOSLEEP_KPROBE_NAME "__arm64_sys_nanosleep"
#else
#define SYS_NANOSLEEP_KPROBE_NAME "sys_nanosleep"
#endif
diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
index 4a64306728ab..3256de30f563 100644
--- a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
+++ b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
@@ -15,7 +15,6 @@
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
-#include "bpf_rlimit.h"
#include "cgroup_helpers.h"
#define CGROUP_PATH "/skb_cgroup_test"
@@ -160,6 +159,9 @@ int main(int argc, char **argv)
exit(EXIT_FAILURE);
}
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
cgfd = cgroup_setup_and_join(CGROUP_PATH);
if (cgfd < 0)
goto err;
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
index fe10f8134278..810c3740b2cc 100644
--- a/tools/testing/selftests/bpf/test_sock.c
+++ b/tools/testing/selftests/bpf/test_sock.c
@@ -14,7 +14,6 @@
#include "cgroup_helpers.h"
#include <bpf/bpf_endian.h>
-#include "bpf_rlimit.h"
#include "bpf_util.h"
#define CG_PATH "/foo"
@@ -493,7 +492,7 @@ static int run_test_case(int cgfd, const struct sock_test *test)
goto err;
}
- if (attach_sock_prog(cgfd, progfd, test->attach_type) == -1) {
+ if (attach_sock_prog(cgfd, progfd, test->attach_type) < 0) {
if (test->result == ATTACH_REJECT)
goto out;
else
@@ -541,6 +540,9 @@ int main(int argc, char **argv)
if (cgfd < 0)
goto err;
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
if (run_tests(cgfd))
goto err;
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index f3d5d7ac6505..458564fcfc82 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -19,7 +19,6 @@
#include <bpf/libbpf.h>
#include "cgroup_helpers.h"
-#include "bpf_rlimit.h"
#include "bpf_util.h"
#ifndef ENOTSUPP
@@ -1418,6 +1417,9 @@ int main(int argc, char **argv)
if (cgfd < 0)
goto err;
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
if (run_tests(cgfd))
goto err;
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index dfb4f5c0fcb9..0fbaccdc8861 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -18,7 +18,6 @@
#include <sched.h>
#include <sys/time.h>
-#include <sys/resource.h>
#include <sys/types.h>
#include <sys/sendfile.h>
@@ -37,7 +36,6 @@
#include <bpf/libbpf.h>
#include "bpf_util.h"
-#include "bpf_rlimit.h"
#include "cgroup_helpers.h"
int running;
@@ -2017,6 +2015,9 @@ int main(int argc, char **argv)
cg_created = 1;
}
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
if (test == SELFTESTS) {
err = test_selftest(cg_fd, &options);
goto out;
diff --git a/tools/testing/selftests/bpf/test_sysctl.c b/tools/testing/selftests/bpf/test_sysctl.c
index 4f6cf833b522..57620e7c9048 100644
--- a/tools/testing/selftests/bpf/test_sysctl.c
+++ b/tools/testing/selftests/bpf/test_sysctl.c
@@ -14,7 +14,6 @@
#include <bpf/libbpf.h>
#include <bpf/bpf_endian.h>
-#include "bpf_rlimit.h"
#include "bpf_util.h"
#include "cgroup_helpers.h"
#include "testing_helpers.h"
@@ -1561,7 +1560,7 @@ static int run_test_case(int cgfd, struct sysctl_test *test)
goto err;
}
- if (bpf_prog_attach(progfd, cgfd, atype, BPF_F_ALLOW_OVERRIDE) == -1) {
+ if (bpf_prog_attach(progfd, cgfd, atype, BPF_F_ALLOW_OVERRIDE) < 0) {
if (test->result == ATTACH_REJECT)
goto out;
else
@@ -1618,6 +1617,9 @@ int main(int argc, char **argv)
if (cgfd < 0)
goto err;
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
if (run_tests(cgfd))
goto err;
diff --git a/tools/testing/selftests/bpf/test_tag.c b/tools/testing/selftests/bpf/test_tag.c
index 0851c42ee31c..5546b05a0486 100644
--- a/tools/testing/selftests/bpf/test_tag.c
+++ b/tools/testing/selftests/bpf/test_tag.c
@@ -20,7 +20,6 @@
#include <bpf/bpf.h>
#include "../../../include/linux/filter.h"
-#include "bpf_rlimit.h"
#include "testing_helpers.h"
static struct bpf_insn prog[BPF_MAXINSNS];
@@ -189,6 +188,9 @@ int main(void)
uint32_t tests = 0;
int i, fd_map;
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
fd_map = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(int),
sizeof(int), 1, &opts);
assert(fd_map > 0);
diff --git a/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c b/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
index e7775d3bbe08..5c8ef062f760 100644
--- a/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
+++ b/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
@@ -15,7 +15,6 @@
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
-#include "bpf_rlimit.h"
#include "cgroup_helpers.h"
static int start_server(const struct sockaddr *addr, socklen_t len, bool dual)
@@ -235,6 +234,9 @@ int main(int argc, char **argv)
exit(1);
}
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
results = get_map_fd_by_prog_id(atoi(argv[1]), &xdp);
if (results < 0) {
log_err("Can't get map");
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c
index 4c5114765b23..8284db8b0f13 100644
--- a/tools/testing/selftests/bpf/test_tcpnotify_user.c
+++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c
@@ -19,7 +19,6 @@
#include <linux/perf_event.h>
#include <linux/err.h>
-#include "bpf_rlimit.h"
#include "bpf_util.h"
#include "cgroup_helpers.h"
diff --git a/tools/testing/selftests/bpf/test_tunnel.sh b/tools/testing/selftests/bpf/test_tunnel.sh
index 2817d9948d59..e9ebc67d73f7 100755
--- a/tools/testing/selftests/bpf/test_tunnel.sh
+++ b/tools/testing/selftests/bpf/test_tunnel.sh
@@ -45,6 +45,7 @@
# 5) Tunnel protocol handler, ex: vxlan_rcv, decap the packet
# 6) Forward the packet to the overlay tnl dev
+BPF_PIN_TUNNEL_DIR="/sys/fs/bpf/tc/tunnel"
PING_ARG="-c 3 -w 10 -q"
ret=0
GREEN='\033[0;92m'
@@ -155,52 +156,6 @@ add_ip6erspan_tunnel()
ip link set dev $DEV up
}
-add_vxlan_tunnel()
-{
- # Set static ARP entry here because iptables set-mark works
- # on L3 packet, as a result not applying to ARP packets,
- # causing errors at get_tunnel_{key/opt}.
-
- # at_ns0 namespace
- ip netns exec at_ns0 \
- ip link add dev $DEV_NS type $TYPE \
- id 2 dstport 4789 gbp remote 172.16.1.200
- ip netns exec at_ns0 \
- ip link set dev $DEV_NS address 52:54:00:d9:01:00 up
- ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
- ip netns exec at_ns0 \
- ip neigh add 10.1.1.200 lladdr 52:54:00:d9:02:00 dev $DEV_NS
- ip netns exec at_ns0 iptables -A OUTPUT -j MARK --set-mark 0x800FF
-
- # root namespace
- ip link add dev $DEV type $TYPE external gbp dstport 4789
- ip link set dev $DEV address 52:54:00:d9:02:00 up
- ip addr add dev $DEV 10.1.1.200/24
- ip neigh add 10.1.1.100 lladdr 52:54:00:d9:01:00 dev $DEV
-}
-
-add_ip6vxlan_tunnel()
-{
- #ip netns exec at_ns0 ip -4 addr del 172.16.1.100 dev veth0
- ip netns exec at_ns0 ip -6 addr add ::11/96 dev veth0
- ip netns exec at_ns0 ip link set dev veth0 up
- #ip -4 addr del 172.16.1.200 dev veth1
- ip -6 addr add dev veth1 ::22/96
- ip link set dev veth1 up
-
- # at_ns0 namespace
- ip netns exec at_ns0 \
- ip link add dev $DEV_NS type $TYPE id 22 dstport 4789 \
- local ::11 remote ::22
- ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
- ip netns exec at_ns0 ip link set dev $DEV_NS up
-
- # root namespace
- ip link add dev $DEV type $TYPE external dstport 4789
- ip addr add dev $DEV 10.1.1.200/24
- ip link set dev $DEV up
-}
-
add_geneve_tunnel()
{
# at_ns0 namespace
@@ -403,58 +358,6 @@ test_ip6erspan()
echo -e ${GREEN}"PASS: $TYPE"${NC}
}
-test_vxlan()
-{
- TYPE=vxlan
- DEV_NS=vxlan00
- DEV=vxlan11
- ret=0
-
- check $TYPE
- config_device
- add_vxlan_tunnel
- attach_bpf $DEV vxlan_set_tunnel vxlan_get_tunnel
- ping $PING_ARG 10.1.1.100
- check_err $?
- ip netns exec at_ns0 ping $PING_ARG 10.1.1.200
- check_err $?
- cleanup
-
- if [ $ret -ne 0 ]; then
- echo -e ${RED}"FAIL: $TYPE"${NC}
- return 1
- fi
- echo -e ${GREEN}"PASS: $TYPE"${NC}
-}
-
-test_ip6vxlan()
-{
- TYPE=vxlan
- DEV_NS=ip6vxlan00
- DEV=ip6vxlan11
- ret=0
-
- check $TYPE
- config_device
- add_ip6vxlan_tunnel
- ip link set dev veth1 mtu 1500
- attach_bpf $DEV ip6vxlan_set_tunnel ip6vxlan_get_tunnel
- # underlay
- ping6 $PING_ARG ::11
- # ip4 over ip6
- ping $PING_ARG 10.1.1.100
- check_err $?
- ip netns exec at_ns0 ping $PING_ARG 10.1.1.200
- check_err $?
- cleanup
-
- if [ $ret -ne 0 ]; then
- echo -e ${RED}"FAIL: ip6$TYPE"${NC}
- return 1
- fi
- echo -e ${GREEN}"PASS: ip6$TYPE"${NC}
-}
-
test_geneve()
{
TYPE=geneve
@@ -641,9 +544,11 @@ test_xfrm_tunnel()
config_device
> /sys/kernel/debug/tracing/trace
setup_xfrm_tunnel
+ mkdir -p ${BPF_PIN_TUNNEL_DIR}
+ bpftool prog loadall ./test_tunnel_kern.o ${BPF_PIN_TUNNEL_DIR}
tc qdisc add dev veth1 clsact
- tc filter add dev veth1 proto ip ingress bpf da obj test_tunnel_kern.o \
- sec xfrm_get_state
+ tc filter add dev veth1 proto ip ingress bpf da object-pinned \
+ ${BPF_PIN_TUNNEL_DIR}/xfrm_get_state
ip netns exec at_ns0 ping $PING_ARG 10.1.1.200
sleep 1
grep "reqid 1" /sys/kernel/debug/tracing/trace
@@ -666,13 +571,17 @@ attach_bpf()
DEV=$1
SET=$2
GET=$3
+ mkdir -p ${BPF_PIN_TUNNEL_DIR}
+ bpftool prog loadall ./test_tunnel_kern.o ${BPF_PIN_TUNNEL_DIR}/
tc qdisc add dev $DEV clsact
- tc filter add dev $DEV egress bpf da obj test_tunnel_kern.o sec $SET
- tc filter add dev $DEV ingress bpf da obj test_tunnel_kern.o sec $GET
+ tc filter add dev $DEV egress bpf da object-pinned ${BPF_PIN_TUNNEL_DIR}/$SET
+ tc filter add dev $DEV ingress bpf da object-pinned ${BPF_PIN_TUNNEL_DIR}/$GET
}
cleanup()
{
+ rm -rf ${BPF_PIN_TUNNEL_DIR}
+
ip netns delete at_ns0 2> /dev/null
ip link del veth1 2> /dev/null
ip link del ipip11 2> /dev/null
@@ -681,8 +590,6 @@ cleanup()
ip link del gretap11 2> /dev/null
ip link del ip6gre11 2> /dev/null
ip link del ip6gretap11 2> /dev/null
- ip link del vxlan11 2> /dev/null
- ip link del ip6vxlan11 2> /dev/null
ip link del geneve11 2> /dev/null
ip link del ip6geneve11 2> /dev/null
ip link del erspan11 2> /dev/null
@@ -714,7 +621,6 @@ enable_debug()
{
echo 'file ip_gre.c +p' > /sys/kernel/debug/dynamic_debug/control
echo 'file ip6_gre.c +p' > /sys/kernel/debug/dynamic_debug/control
- echo 'file vxlan.c +p' > /sys/kernel/debug/dynamic_debug/control
echo 'file geneve.c +p' > /sys/kernel/debug/dynamic_debug/control
echo 'file ipip.c +p' > /sys/kernel/debug/dynamic_debug/control
}
@@ -750,14 +656,6 @@ bpf_tunnel_test()
test_ip6erspan v2
errors=$(( $errors + $? ))
- echo "Testing VXLAN tunnel..."
- test_vxlan
- errors=$(( $errors + $? ))
-
- echo "Testing IP6VXLAN tunnel..."
- test_ip6vxlan
- errors=$(( $errors + $? ))
-
echo "Testing GENEVE tunnel..."
test_geneve
errors=$(( $errors + $? ))
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index a2cd236c32eb..f9d553fbf68a 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -51,12 +51,24 @@
#endif
#define MAX_INSNS BPF_MAXINSNS
+#define MAX_EXPECTED_INSNS 32
+#define MAX_UNEXPECTED_INSNS 32
#define MAX_TEST_INSNS 1000000
#define MAX_FIXUPS 8
-#define MAX_NR_MAPS 22
+#define MAX_NR_MAPS 23
#define MAX_TEST_RUNS 8
#define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64
+#define MAX_FUNC_INFOS 8
+#define MAX_BTF_STRINGS 256
+#define MAX_BTF_TYPES 256
+
+#define INSN_OFF_MASK ((__s16)0xFFFF)
+#define INSN_IMM_MASK ((__s32)0xFFFFFFFF)
+#define SKIP_INSNS() BPF_RAW_INSN(0xde, 0xa, 0xd, 0xbeef, 0xdeadbeef)
+
+#define DEFAULT_LIBBPF_LOG_LEVEL 4
+#define VERBOSE_LIBBPF_LOG_LEVEL 1
#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
@@ -79,6 +91,23 @@ struct bpf_test {
const char *descr;
struct bpf_insn insns[MAX_INSNS];
struct bpf_insn *fill_insns;
+ /* If specified, test engine looks for this sequence of
+ * instructions in the BPF program after loading. Allows to
+ * test rewrites applied by verifier. Use values
+ * INSN_OFF_MASK and INSN_IMM_MASK to mask `off` and `imm`
+ * fields if content does not matter. The test case fails if
+ * specified instructions are not found.
+ *
+ * The sequence could be split into sub-sequences by adding
+ * SKIP_INSNS instruction at the end of each sub-sequence. In
+ * such case sub-sequences are searched for one after another.
+ */
+ struct bpf_insn expected_insns[MAX_EXPECTED_INSNS];
+ /* If specified, test engine applies same pattern matching
+ * logic as for `expected_insns`. If the specified pattern is
+ * matched test case is marked as failed.
+ */
+ struct bpf_insn unexpected_insns[MAX_UNEXPECTED_INSNS];
int fixup_map_hash_8b[MAX_FIXUPS];
int fixup_map_hash_48b[MAX_FIXUPS];
int fixup_map_hash_16b[MAX_FIXUPS];
@@ -101,6 +130,7 @@ struct bpf_test {
int fixup_map_reuseport_array[MAX_FIXUPS];
int fixup_map_ringbuf[MAX_FIXUPS];
int fixup_map_timer[MAX_FIXUPS];
+ int fixup_map_kptr[MAX_FIXUPS];
struct kfunc_btf_id_pair fixup_kfunc_btf_id[MAX_FIXUPS];
/* Expected verifier log output for result REJECT or VERBOSE_ACCEPT.
* Can be a tab-separated sequence of expected strings. An empty string
@@ -134,6 +164,14 @@ struct bpf_test {
};
enum bpf_attach_type expected_attach_type;
const char *kfunc;
+ struct bpf_func_info func_info[MAX_FUNC_INFOS];
+ int func_info_cnt;
+ char btf_strings[MAX_BTF_STRINGS];
+ /* A set of BTF types to load when specified,
+ * use macro definitions from test_btf.h,
+ * must end with BTF_END_RAW
+ */
+ __u32 btf_types[MAX_BTF_TYPES];
};
/* Note we want this to be 64 bit aligned so that the end of our array is
@@ -387,6 +425,45 @@ static void bpf_fill_torturous_jumps(struct bpf_test *self)
}
}
+static void bpf_fill_big_prog_with_loop_1(struct bpf_test *self)
+{
+ struct bpf_insn *insn = self->fill_insns;
+ /* This test was added to catch a specific use after free
+ * error, which happened upon BPF program reallocation.
+ * Reallocation is handled by core.c:bpf_prog_realloc, which
+ * reuses old memory if page boundary is not crossed. The
+ * value of `len` is chosen to cross this boundary on bpf_loop
+ * patching.
+ */
+ const int len = getpagesize() - 25;
+ int callback_load_idx;
+ int callback_idx;
+ int i = 0;
+
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1);
+ callback_load_idx = i;
+ insn[i++] = BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW,
+ BPF_REG_2, BPF_PSEUDO_FUNC, 0,
+ 777 /* filled below */);
+ insn[i++] = BPF_RAW_INSN(0, 0, 0, 0, 0);
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0);
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0);
+ insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop);
+
+ while (i < len - 3)
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0);
+ insn[i++] = BPF_EXIT_INSN();
+
+ callback_idx = i;
+ insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0);
+ insn[i++] = BPF_EXIT_INSN();
+
+ insn[callback_load_idx].imm = callback_idx - callback_load_idx - 1;
+ self->func_info[1].insn_off = callback_idx;
+ self->prog_len = i;
+ assert(i == len);
+}
+
/* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
#define BPF_SK_LOOKUP(func) \
/* struct bpf_sock_tuple tuple = {} */ \
@@ -621,8 +698,15 @@ static int create_cgroup_storage(bool percpu)
* struct timer {
* struct bpf_timer t;
* };
+ * struct btf_ptr {
+ * struct prog_test_ref_kfunc __kptr *ptr;
+ * struct prog_test_ref_kfunc __kptr_ref *ptr;
+ * struct prog_test_member __kptr_ref *ptr;
+ * }
*/
-static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t";
+static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t"
+ "\0btf_ptr\0prog_test_ref_kfunc\0ptr\0kptr\0kptr_ref"
+ "\0prog_test_member";
static __u32 btf_raw_types[] = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
@@ -638,36 +722,84 @@ static __u32 btf_raw_types[] = {
/* struct timer */ /* [5] */
BTF_TYPE_ENC(35, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16),
BTF_MEMBER_ENC(41, 4, 0), /* struct bpf_timer t; */
+ /* struct prog_test_ref_kfunc */ /* [6] */
+ BTF_STRUCT_ENC(51, 0, 0),
+ BTF_STRUCT_ENC(89, 0, 0), /* [7] */
+ /* type tag "kptr" */
+ BTF_TYPE_TAG_ENC(75, 6), /* [8] */
+ /* type tag "kptr_ref" */
+ BTF_TYPE_TAG_ENC(80, 6), /* [9] */
+ BTF_TYPE_TAG_ENC(80, 7), /* [10] */
+ BTF_PTR_ENC(8), /* [11] */
+ BTF_PTR_ENC(9), /* [12] */
+ BTF_PTR_ENC(10), /* [13] */
+ /* struct btf_ptr */ /* [14] */
+ BTF_STRUCT_ENC(43, 3, 24),
+ BTF_MEMBER_ENC(71, 11, 0), /* struct prog_test_ref_kfunc __kptr *ptr; */
+ BTF_MEMBER_ENC(71, 12, 64), /* struct prog_test_ref_kfunc __kptr_ref *ptr; */
+ BTF_MEMBER_ENC(71, 13, 128), /* struct prog_test_member __kptr_ref *ptr; */
};
-static int load_btf(void)
+static char bpf_vlog[UINT_MAX >> 8];
+
+static int load_btf_spec(__u32 *types, int types_len,
+ const char *strings, int strings_len)
{
struct btf_header hdr = {
.magic = BTF_MAGIC,
.version = BTF_VERSION,
.hdr_len = sizeof(struct btf_header),
- .type_len = sizeof(btf_raw_types),
- .str_off = sizeof(btf_raw_types),
- .str_len = sizeof(btf_str_sec),
+ .type_len = types_len,
+ .str_off = types_len,
+ .str_len = strings_len,
};
void *ptr, *raw_btf;
int btf_fd;
+ LIBBPF_OPTS(bpf_btf_load_opts, opts,
+ .log_buf = bpf_vlog,
+ .log_size = sizeof(bpf_vlog),
+ .log_level = (verbose
+ ? VERBOSE_LIBBPF_LOG_LEVEL
+ : DEFAULT_LIBBPF_LOG_LEVEL),
+ );
- ptr = raw_btf = malloc(sizeof(hdr) + sizeof(btf_raw_types) +
- sizeof(btf_str_sec));
+ raw_btf = malloc(sizeof(hdr) + types_len + strings_len);
+ ptr = raw_btf;
memcpy(ptr, &hdr, sizeof(hdr));
ptr += sizeof(hdr);
- memcpy(ptr, btf_raw_types, hdr.type_len);
+ memcpy(ptr, types, hdr.type_len);
ptr += hdr.type_len;
- memcpy(ptr, btf_str_sec, hdr.str_len);
+ memcpy(ptr, strings, hdr.str_len);
ptr += hdr.str_len;
- btf_fd = bpf_btf_load(raw_btf, ptr - raw_btf, NULL);
- free(raw_btf);
+ btf_fd = bpf_btf_load(raw_btf, ptr - raw_btf, &opts);
if (btf_fd < 0)
- return -1;
- return btf_fd;
+ printf("Failed to load BTF spec: '%s'\n", strerror(errno));
+
+ free(raw_btf);
+
+ return btf_fd < 0 ? -1 : btf_fd;
+}
+
+static int load_btf(void)
+{
+ return load_btf_spec(btf_raw_types, sizeof(btf_raw_types),
+ btf_str_sec, sizeof(btf_str_sec));
+}
+
+static int load_btf_for_test(struct bpf_test *test)
+{
+ int types_num = 0;
+
+ while (types_num < MAX_BTF_TYPES &&
+ test->btf_types[types_num] != BTF_END_RAW)
+ ++types_num;
+
+ int types_len = types_num * sizeof(test->btf_types[0]);
+
+ return load_btf_spec(test->btf_types, types_len,
+ test->btf_strings, sizeof(test->btf_strings));
}
static int create_map_spin_lock(void)
@@ -727,7 +859,24 @@ static int create_map_timer(void)
return fd;
}
-static char bpf_vlog[UINT_MAX >> 8];
+static int create_map_kptr(void)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, opts,
+ .btf_key_type_id = 1,
+ .btf_value_type_id = 14,
+ );
+ int fd, btf_fd;
+
+ btf_fd = load_btf();
+ if (btf_fd < 0)
+ return -1;
+
+ opts.btf_fd = btf_fd;
+ fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 24, 1, &opts);
+ if (fd < 0)
+ printf("Failed to create map with btf_id pointer\n");
+ return fd;
+}
static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
struct bpf_insn *prog, int *map_fds)
@@ -754,6 +903,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
int *fixup_map_ringbuf = test->fixup_map_ringbuf;
int *fixup_map_timer = test->fixup_map_timer;
+ int *fixup_map_kptr = test->fixup_map_kptr;
struct kfunc_btf_id_pair *fixup_kfunc_btf_id = test->fixup_kfunc_btf_id;
if (test->fill_helper) {
@@ -947,6 +1097,13 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
fixup_map_timer++;
} while (*fixup_map_timer);
}
+ if (*fixup_map_kptr) {
+ map_fds[22] = create_map_kptr();
+ do {
+ prog[*fixup_map_kptr].imm = map_fds[22];
+ fixup_map_kptr++;
+ } while (*fixup_map_kptr);
+ }
/* Patch in kfunc BTF IDs */
if (fixup_kfunc_btf_id->kfunc) {
@@ -1075,10 +1232,218 @@ static bool cmp_str_seq(const char *log, const char *exp)
return true;
}
+static int get_xlated_program(int fd_prog, struct bpf_insn **buf, int *cnt)
+{
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ __u32 xlated_prog_len;
+ __u32 buf_element_size = sizeof(struct bpf_insn);
+
+ if (bpf_obj_get_info_by_fd(fd_prog, &info, &info_len)) {
+ perror("bpf_obj_get_info_by_fd failed");
+ return -1;
+ }
+
+ xlated_prog_len = info.xlated_prog_len;
+ if (xlated_prog_len % buf_element_size) {
+ printf("Program length %d is not multiple of %d\n",
+ xlated_prog_len, buf_element_size);
+ return -1;
+ }
+
+ *cnt = xlated_prog_len / buf_element_size;
+ *buf = calloc(*cnt, buf_element_size);
+ if (!buf) {
+ perror("can't allocate xlated program buffer");
+ return -ENOMEM;
+ }
+
+ bzero(&info, sizeof(info));
+ info.xlated_prog_len = xlated_prog_len;
+ info.xlated_prog_insns = (__u64)*buf;
+ if (bpf_obj_get_info_by_fd(fd_prog, &info, &info_len)) {
+ perror("second bpf_obj_get_info_by_fd failed");
+ goto out_free_buf;
+ }
+
+ return 0;
+
+out_free_buf:
+ free(*buf);
+ return -1;
+}
+
+static bool is_null_insn(struct bpf_insn *insn)
+{
+ struct bpf_insn null_insn = {};
+
+ return memcmp(insn, &null_insn, sizeof(null_insn)) == 0;
+}
+
+static bool is_skip_insn(struct bpf_insn *insn)
+{
+ struct bpf_insn skip_insn = SKIP_INSNS();
+
+ return memcmp(insn, &skip_insn, sizeof(skip_insn)) == 0;
+}
+
+static int null_terminated_insn_len(struct bpf_insn *seq, int max_len)
+{
+ int i;
+
+ for (i = 0; i < max_len; ++i) {
+ if (is_null_insn(&seq[i]))
+ return i;
+ }
+ return max_len;
+}
+
+static bool compare_masked_insn(struct bpf_insn *orig, struct bpf_insn *masked)
+{
+ struct bpf_insn orig_masked;
+
+ memcpy(&orig_masked, orig, sizeof(orig_masked));
+ if (masked->imm == INSN_IMM_MASK)
+ orig_masked.imm = INSN_IMM_MASK;
+ if (masked->off == INSN_OFF_MASK)
+ orig_masked.off = INSN_OFF_MASK;
+
+ return memcmp(&orig_masked, masked, sizeof(orig_masked)) == 0;
+}
+
+static int find_insn_subseq(struct bpf_insn *seq, struct bpf_insn *subseq,
+ int seq_len, int subseq_len)
+{
+ int i, j;
+
+ if (subseq_len > seq_len)
+ return -1;
+
+ for (i = 0; i < seq_len - subseq_len + 1; ++i) {
+ bool found = true;
+
+ for (j = 0; j < subseq_len; ++j) {
+ if (!compare_masked_insn(&seq[i + j], &subseq[j])) {
+ found = false;
+ break;
+ }
+ }
+ if (found)
+ return i;
+ }
+
+ return -1;
+}
+
+static int find_skip_insn_marker(struct bpf_insn *seq, int len)
+{
+ int i;
+
+ for (i = 0; i < len; ++i)
+ if (is_skip_insn(&seq[i]))
+ return i;
+
+ return -1;
+}
+
+/* Return true if all sub-sequences in `subseqs` could be found in
+ * `seq` one after another. Sub-sequences are separated by a single
+ * nil instruction.
+ */
+static bool find_all_insn_subseqs(struct bpf_insn *seq, struct bpf_insn *subseqs,
+ int seq_len, int max_subseqs_len)
+{
+ int subseqs_len = null_terminated_insn_len(subseqs, max_subseqs_len);
+
+ while (subseqs_len > 0) {
+ int skip_idx = find_skip_insn_marker(subseqs, subseqs_len);
+ int cur_subseq_len = skip_idx < 0 ? subseqs_len : skip_idx;
+ int subseq_idx = find_insn_subseq(seq, subseqs,
+ seq_len, cur_subseq_len);
+
+ if (subseq_idx < 0)
+ return false;
+ seq += subseq_idx + cur_subseq_len;
+ seq_len -= subseq_idx + cur_subseq_len;
+ subseqs += cur_subseq_len + 1;
+ subseqs_len -= cur_subseq_len + 1;
+ }
+
+ return true;
+}
+
+static void print_insn(struct bpf_insn *buf, int cnt)
+{
+ int i;
+
+ printf(" addr op d s off imm\n");
+ for (i = 0; i < cnt; ++i) {
+ struct bpf_insn *insn = &buf[i];
+
+ if (is_null_insn(insn))
+ break;
+
+ if (is_skip_insn(insn))
+ printf(" ...\n");
+ else
+ printf(" %04x: %02x %1x %x %04hx %08x\n",
+ i, insn->code, insn->dst_reg,
+ insn->src_reg, insn->off, insn->imm);
+ }
+}
+
+static bool check_xlated_program(struct bpf_test *test, int fd_prog)
+{
+ struct bpf_insn *buf;
+ int cnt;
+ bool result = true;
+ bool check_expected = !is_null_insn(test->expected_insns);
+ bool check_unexpected = !is_null_insn(test->unexpected_insns);
+
+ if (!check_expected && !check_unexpected)
+ goto out;
+
+ if (get_xlated_program(fd_prog, &buf, &cnt)) {
+ printf("FAIL: can't get xlated program\n");
+ result = false;
+ goto out;
+ }
+
+ if (check_expected &&
+ !find_all_insn_subseqs(buf, test->expected_insns,
+ cnt, MAX_EXPECTED_INSNS)) {
+ printf("FAIL: can't find expected subsequence of instructions\n");
+ result = false;
+ if (verbose) {
+ printf("Program:\n");
+ print_insn(buf, cnt);
+ printf("Expected subsequence:\n");
+ print_insn(test->expected_insns, MAX_EXPECTED_INSNS);
+ }
+ }
+
+ if (check_unexpected &&
+ find_all_insn_subseqs(buf, test->unexpected_insns,
+ cnt, MAX_UNEXPECTED_INSNS)) {
+ printf("FAIL: found unexpected subsequence of instructions\n");
+ result = false;
+ if (verbose) {
+ printf("Program:\n");
+ print_insn(buf, cnt);
+ printf("Un-expected subsequence:\n");
+ print_insn(test->unexpected_insns, MAX_UNEXPECTED_INSNS);
+ }
+ }
+
+ free(buf);
+ out:
+ return result;
+}
+
static void do_test_single(struct bpf_test *test, bool unpriv,
int *passes, int *errors)
{
- int fd_prog, expected_ret, alignment_prevented_execution;
+ int fd_prog, btf_fd, expected_ret, alignment_prevented_execution;
int prog_len, prog_type = test->prog_type;
struct bpf_insn *prog = test->insns;
LIBBPF_OPTS(bpf_prog_load_opts, opts);
@@ -1090,8 +1455,10 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
__u32 pflags;
int i, err;
+ fd_prog = -1;
for (i = 0; i < MAX_NR_MAPS; i++)
map_fds[i] = -1;
+ btf_fd = -1;
if (!prog_type)
prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
@@ -1124,11 +1491,11 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
opts.expected_attach_type = test->expected_attach_type;
if (verbose)
- opts.log_level = 1;
+ opts.log_level = VERBOSE_LIBBPF_LOG_LEVEL;
else if (expected_ret == VERBOSE_ACCEPT)
opts.log_level = 2;
else
- opts.log_level = 4;
+ opts.log_level = DEFAULT_LIBBPF_LOG_LEVEL;
opts.prog_flags = pflags;
if (prog_type == BPF_PROG_TYPE_TRACING && test->kfunc) {
@@ -1146,6 +1513,19 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
opts.attach_btf_id = attach_btf_id;
}
+ if (test->btf_types[0] != 0) {
+ btf_fd = load_btf_for_test(test);
+ if (btf_fd < 0)
+ goto fail_log;
+ opts.prog_btf_fd = btf_fd;
+ }
+
+ if (test->func_info_cnt != 0) {
+ opts.func_info = test->func_info;
+ opts.func_info_cnt = test->func_info_cnt;
+ opts.func_info_rec_size = sizeof(test->func_info[0]);
+ }
+
opts.log_buf = bpf_vlog;
opts.log_size = sizeof(bpf_vlog);
fd_prog = bpf_prog_load(prog_type, NULL, "GPL", prog, prog_len, &opts);
@@ -1211,6 +1591,9 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
if (verbose)
printf(", verifier log:\n%s", bpf_vlog);
+ if (!check_xlated_program(test, fd_prog))
+ goto fail_log;
+
run_errs = 0;
run_successes = 0;
if (!alignment_prevented_execution && fd_prog >= 0 && test->runs >= 0) {
@@ -1254,6 +1637,7 @@ close_fds:
if (test->fill_insns)
free(test->fill_insns);
close(fd_prog);
+ close(btf_fd);
for (i = 0; i < MAX_NR_MAPS; i++)
close(map_fds[i]);
sched_yield();
diff --git a/tools/testing/selftests/bpf/test_verifier_log.c b/tools/testing/selftests/bpf/test_verifier_log.c
index 8d6918c3b4a2..70feda97cee5 100644
--- a/tools/testing/selftests/bpf/test_verifier_log.c
+++ b/tools/testing/selftests/bpf/test_verifier_log.c
@@ -11,8 +11,6 @@
#include <bpf/bpf.h>
-#include "bpf_rlimit.h"
-
#define LOG_SIZE (1 << 20)
#define err(str...) printf("ERROR: " str)
@@ -141,6 +139,9 @@ int main(int argc, char **argv)
memset(log, 1, LOG_SIZE);
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
/* Test incorrect attr */
printf("Test log_level 0...\n");
test_log_bad(log, LOG_SIZE, 0);
diff --git a/tools/testing/selftests/bpf/test_xdp_veth.sh b/tools/testing/selftests/bpf/test_xdp_veth.sh
index 392d28cc4e58..49936c4c8567 100755
--- a/tools/testing/selftests/bpf/test_xdp_veth.sh
+++ b/tools/testing/selftests/bpf/test_xdp_veth.sh
@@ -106,9 +106,9 @@ bpftool prog loadall \
bpftool map update pinned $BPF_DIR/maps/tx_port key 0 0 0 0 value 122 0 0 0
bpftool map update pinned $BPF_DIR/maps/tx_port key 1 0 0 0 value 133 0 0 0
bpftool map update pinned $BPF_DIR/maps/tx_port key 2 0 0 0 value 111 0 0 0
-ip link set dev veth1 xdp pinned $BPF_DIR/progs/redirect_map_0
-ip link set dev veth2 xdp pinned $BPF_DIR/progs/redirect_map_1
-ip link set dev veth3 xdp pinned $BPF_DIR/progs/redirect_map_2
+ip link set dev veth1 xdp pinned $BPF_DIR/progs/xdp_redirect_map_0
+ip link set dev veth2 xdp pinned $BPF_DIR/progs/xdp_redirect_map_1
+ip link set dev veth3 xdp pinned $BPF_DIR/progs/xdp_redirect_map_2
ip -n ${NS1} link set dev veth11 xdp obj xdp_dummy.o sec xdp
ip -n ${NS2} link set dev veth22 xdp obj xdp_tx.o sec xdp
diff --git a/tools/testing/selftests/bpf/test_xdping.sh b/tools/testing/selftests/bpf/test_xdping.sh
index c2f0ddb45531..c3d82e0a7378 100755
--- a/tools/testing/selftests/bpf/test_xdping.sh
+++ b/tools/testing/selftests/bpf/test_xdping.sh
@@ -95,5 +95,9 @@ for server_args in "" "-I veth0 -s -S" ; do
test "$client_args" "$server_args"
done
+# Test drv mode
+test "-I veth1 -N" "-I veth0 -s -N"
+test "-I veth1 -N -c 10" "-I veth0 -s -N"
+
echo "OK. All tests passed"
exit 0
diff --git a/tools/testing/selftests/bpf/test_xsk.sh b/tools/testing/selftests/bpf/test_xsk.sh
index cd7bf32e6a17..096a957594cd 100755
--- a/tools/testing/selftests/bpf/test_xsk.sh
+++ b/tools/testing/selftests/bpf/test_xsk.sh
@@ -43,12 +43,11 @@
# ** veth<xxxx> in root namespace
# ** veth<yyyy> in af_xdp<xxxx> namespace
# ** namespace af_xdp<xxxx>
-# * create a spec file veth.spec that includes this run-time configuration
# *** xxxx and yyyy are randomly generated 4 digit numbers used to avoid
# conflict with any existing interface
# * tests the veth and xsk layers of the topology
#
-# See the source xdpxceiver.c for information on each test
+# See the source xskxceiver.c for information on each test
#
# Kernel configuration:
# ---------------------
@@ -77,7 +76,7 @@
. xsk_prereqs.sh
-while getopts "cvD" flag
+while getopts "vD" flag
do
case "${flag}" in
v) verbose=1;;
@@ -88,7 +87,7 @@ done
TEST_NAME="PREREQUISITES"
URANDOM=/dev/urandom
-[ ! -e "${URANDOM}" ] && { echo "${URANDOM} not found. Skipping tests."; test_exit 1 1; }
+[ ! -e "${URANDOM}" ] && { echo "${URANDOM} not found. Skipping tests."; test_exit $ksft_fail; }
VETH0_POSTFIX=$(cat ${URANDOM} | tr -dc '0-9' | fold -w 256 | head -n 1 | head --bytes 4)
VETH0=ve${VETH0_POSTFIX}
@@ -98,6 +97,13 @@ NS0=root
NS1=af_xdp${VETH1_POSTFIX}
MTU=1500
+trap ctrl_c INT
+
+function ctrl_c() {
+ cleanup_exit ${VETH0} ${VETH1} ${NS1}
+ exit 1
+}
+
setup_vethPairs() {
if [[ $verbose -eq 1 ]]; then
echo "setting up ${VETH0}: namespace: ${NS0}"
@@ -110,6 +116,14 @@ setup_vethPairs() {
if [[ $verbose -eq 1 ]]; then
echo "setting up ${VETH1}: namespace: ${NS1}"
fi
+
+ if [[ $busy_poll -eq 1 ]]; then
+ echo 2 > /sys/class/net/${VETH0}/napi_defer_hard_irqs
+ echo 200000 > /sys/class/net/${VETH0}/gro_flush_timeout
+ echo 2 > /sys/class/net/${VETH1}/napi_defer_hard_irqs
+ echo 200000 > /sys/class/net/${VETH1}/gro_flush_timeout
+ fi
+
ip link set ${VETH1} netns ${NS1}
ip netns exec ${NS1} ip link set ${VETH1} mtu ${MTU}
ip link set ${VETH0} mtu ${MTU}
@@ -130,17 +144,12 @@ if [ $retval -ne 0 ]; then
exit $retval
fi
-echo "${VETH0}:${VETH1},${NS1}" > ${SPECFILE}
-
-validate_veth_spec_file
-
if [[ $verbose -eq 1 ]]; then
- echo "Spec file created: ${SPECFILE}"
- VERBOSE_ARG="-v"
+ ARGS+="-v "
fi
if [[ $dump_pkts -eq 1 ]]; then
- DUMP_PKTS_ARG="-D"
+ ARGS="-D "
fi
test_status $retval "${TEST_NAME}"
@@ -149,23 +158,31 @@ test_status $retval "${TEST_NAME}"
statusList=()
-TEST_NAME="XSK KSELFTESTS"
+TEST_NAME="XSK_SELFTESTS_SOFTIRQ"
-execxdpxceiver
+exec_xskxceiver
-retval=$?
-test_status $retval "${TEST_NAME}"
-statusList+=($retval)
+cleanup_exit ${VETH0} ${VETH1} ${NS1}
+TEST_NAME="XSK_SELFTESTS_BUSY_POLL"
+busy_poll=1
+
+setup_vethPairs
+exec_xskxceiver
## END TESTS
cleanup_exit ${VETH0} ${VETH1} ${NS1}
-for _status in "${statusList[@]}"
+failures=0
+echo -e "\nSummary:"
+for i in "${!statusList[@]}"
do
- if [ $_status -ne 0 ]; then
- test_exit $ksft_fail 0
+ if [ ${statusList[$i]} -ne 0 ]; then
+ test_status ${statusList[$i]} ${nameList[$i]}
+ failures=1
fi
done
-test_exit $ksft_pass 0
+if [ $failures -eq 0 ]; then
+ echo "All tests successful!"
+fi
diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c
index 795b6798ccee..9695318e8132 100644
--- a/tools/testing/selftests/bpf/testing_helpers.c
+++ b/tools/testing/selftests/bpf/testing_helpers.c
@@ -6,6 +6,7 @@
#include <errno.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
+#include "test_progs.h"
#include "testing_helpers.h"
int parse_num_list(const char *s, bool **num_set, int *num_set_len)
@@ -60,7 +61,7 @@ int parse_num_list(const char *s, bool **num_set, int *num_set_len)
set[i] = true;
}
- if (!set)
+ if (!set || parsing_end)
return -EINVAL;
*num_set = set;
@@ -69,6 +70,94 @@ int parse_num_list(const char *s, bool **num_set, int *num_set_len)
return 0;
}
+int parse_test_list(const char *s,
+ struct test_filter_set *set,
+ bool is_glob_pattern)
+{
+ char *input, *state = NULL, *next;
+ struct test_filter *tmp, *tests = NULL;
+ int i, j, cnt = 0;
+
+ input = strdup(s);
+ if (!input)
+ return -ENOMEM;
+
+ while ((next = strtok_r(state ? NULL : input, ",", &state))) {
+ char *subtest_str = strchr(next, '/');
+ char *pattern = NULL;
+ int glob_chars = 0;
+
+ tmp = realloc(tests, sizeof(*tests) * (cnt + 1));
+ if (!tmp)
+ goto err;
+ tests = tmp;
+
+ tests[cnt].subtest_cnt = 0;
+ tests[cnt].subtests = NULL;
+
+ if (is_glob_pattern) {
+ pattern = "%s";
+ } else {
+ pattern = "*%s*";
+ glob_chars = 2;
+ }
+
+ if (subtest_str) {
+ char **tmp_subtests = NULL;
+ int subtest_cnt = tests[cnt].subtest_cnt;
+
+ *subtest_str = '\0';
+ subtest_str += 1;
+ tmp_subtests = realloc(tests[cnt].subtests,
+ sizeof(*tmp_subtests) *
+ (subtest_cnt + 1));
+ if (!tmp_subtests)
+ goto err;
+ tests[cnt].subtests = tmp_subtests;
+
+ tests[cnt].subtests[subtest_cnt] =
+ malloc(strlen(subtest_str) + glob_chars + 1);
+ if (!tests[cnt].subtests[subtest_cnt])
+ goto err;
+ sprintf(tests[cnt].subtests[subtest_cnt],
+ pattern,
+ subtest_str);
+
+ tests[cnt].subtest_cnt++;
+ }
+
+ tests[cnt].name = malloc(strlen(next) + glob_chars + 1);
+ if (!tests[cnt].name)
+ goto err;
+ sprintf(tests[cnt].name, pattern, next);
+
+ cnt++;
+ }
+
+ tmp = realloc(set->tests, sizeof(*tests) * (cnt + set->cnt));
+ if (!tmp)
+ goto err;
+
+ memcpy(tmp + set->cnt, tests, sizeof(*tests) * cnt);
+ set->tests = tmp;
+ set->cnt += cnt;
+
+ free(tests);
+ free(input);
+ return 0;
+
+err:
+ for (i = 0; i < cnt; i++) {
+ for (j = 0; j < tests[i].subtest_cnt; j++)
+ free(tests[i].subtests[j]);
+
+ free(tests[i].name);
+ }
+ free(tests);
+ free(input);
+ return -ENOMEM;
+}
+
__u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info)
{
__u32 info_len = sizeof(*info);
diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h
index f46ebc476ee8..6ec00bf79cb5 100644
--- a/tools/testing/selftests/bpf/testing_helpers.h
+++ b/tools/testing/selftests/bpf/testing_helpers.h
@@ -12,3 +12,11 @@ int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
size_t insns_cnt, const char *license,
__u32 kern_version, char *log_buf,
size_t log_buf_sz);
+
+/*
+ * below function is exported for testing in prog_test test
+ */
+struct test_filter_set;
+int parse_test_list(const char *s,
+ struct test_filter_set *test_set,
+ bool is_glob_pattern);
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index 3d6217e3aff7..9c4be2cdb21a 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -25,15 +25,12 @@ static int ksym_cmp(const void *p1, const void *p2)
int load_kallsyms(void)
{
- FILE *f = fopen("/proc/kallsyms", "r");
+ FILE *f;
char func[256], buf[256];
char symbol;
void *addr;
int i = 0;
- if (!f)
- return -ENOENT;
-
/*
* This is called/used from multiplace places,
* load symbols just once.
@@ -41,6 +38,10 @@ int load_kallsyms(void)
if (sym_cnt)
return 0;
+ f = fopen("/proc/kallsyms", "r");
+ if (!f)
+ return -ENOENT;
+
while (fgets(buf, sizeof(buf), f)) {
if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3)
break;
diff --git a/tools/testing/selftests/bpf/urandom_read.c b/tools/testing/selftests/bpf/urandom_read.c
index db781052758d..e92644d0fa75 100644
--- a/tools/testing/selftests/bpf/urandom_read.c
+++ b/tools/testing/selftests/bpf/urandom_read.c
@@ -1,32 +1,85 @@
+#include <stdbool.h>
#include <stdio.h>
#include <unistd.h>
+#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdlib.h>
+#include <signal.h>
+
+#define _SDT_HAS_SEMAPHORES 1
+#include "sdt.h"
+
+#define SEC(name) __attribute__((section(name), used))
#define BUF_SIZE 256
+/* defined in urandom_read_aux.c */
+void urand_read_without_sema(int iter_num, int iter_cnt, int read_sz);
+/* these are coming from urandom_read_lib{1,2}.c */
+void urandlib_read_with_sema(int iter_num, int iter_cnt, int read_sz);
+void urandlib_read_without_sema(int iter_num, int iter_cnt, int read_sz);
+
+unsigned short urand_read_with_sema_semaphore SEC(".probes");
+
static __attribute__((noinline))
void urandom_read(int fd, int count)
{
- char buf[BUF_SIZE];
- int i;
+ char buf[BUF_SIZE];
+ int i;
+
+ for (i = 0; i < count; ++i) {
+ read(fd, buf, BUF_SIZE);
+
+ /* trigger USDTs defined in executable itself */
+ urand_read_without_sema(i, count, BUF_SIZE);
+ STAP_PROBE3(urand, read_with_sema, i, count, BUF_SIZE);
- for (i = 0; i < count; ++i)
- read(fd, buf, BUF_SIZE);
+ /* trigger USDTs defined in shared lib */
+ urandlib_read_without_sema(i, count, BUF_SIZE);
+ urandlib_read_with_sema(i, count, BUF_SIZE);
+ }
+}
+
+static volatile bool parent_ready;
+
+static void handle_sigpipe(int sig)
+{
+ parent_ready = true;
}
int main(int argc, char *argv[])
{
int fd = open("/dev/urandom", O_RDONLY);
int count = 4;
+ bool report_pid = false;
if (fd < 0)
return 1;
- if (argc == 2)
+ if (argc >= 2)
count = atoi(argv[1]);
+ if (argc >= 3) {
+ report_pid = true;
+ /* install SIGPIPE handler to catch when parent closes their
+ * end of the pipe (on the other side of our stdout)
+ */
+ signal(SIGPIPE, handle_sigpipe);
+ }
+
+ /* report PID and wait for parent process to send us "signal" by
+ * closing stdout
+ */
+ if (report_pid) {
+ while (!parent_ready) {
+ fprintf(stdout, "%d\n", getpid());
+ fflush(stdout);
+ }
+ /* at this point stdout is closed, parent process knows our
+ * PID and is ready to trace us
+ */
+ }
urandom_read(fd, count);
diff --git a/tools/testing/selftests/bpf/urandom_read_aux.c b/tools/testing/selftests/bpf/urandom_read_aux.c
new file mode 100644
index 000000000000..6132edcfea74
--- /dev/null
+++ b/tools/testing/selftests/bpf/urandom_read_aux.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include "sdt.h"
+
+void urand_read_without_sema(int iter_num, int iter_cnt, int read_sz)
+{
+ /* semaphore-less USDT */
+ STAP_PROBE3(urand, read_without_sema, iter_num, iter_cnt, read_sz);
+}
diff --git a/tools/testing/selftests/bpf/urandom_read_lib1.c b/tools/testing/selftests/bpf/urandom_read_lib1.c
new file mode 100644
index 000000000000..86186e24b740
--- /dev/null
+++ b/tools/testing/selftests/bpf/urandom_read_lib1.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#define _SDT_HAS_SEMAPHORES 1
+#include "sdt.h"
+
+#define SEC(name) __attribute__((section(name), used))
+
+unsigned short urandlib_read_with_sema_semaphore SEC(".probes");
+
+void urandlib_read_with_sema(int iter_num, int iter_cnt, int read_sz)
+{
+ STAP_PROBE3(urandlib, read_with_sema, iter_num, iter_cnt, read_sz);
+}
diff --git a/tools/testing/selftests/bpf/urandom_read_lib2.c b/tools/testing/selftests/bpf/urandom_read_lib2.c
new file mode 100644
index 000000000000..9d401ad9838f
--- /dev/null
+++ b/tools/testing/selftests/bpf/urandom_read_lib2.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include "sdt.h"
+
+void urandlib_read_without_sema(int iter_num, int iter_cnt, int read_sz)
+{
+ STAP_PROBE3(urandlib, read_without_sema, iter_num, iter_cnt, read_sz);
+}
diff --git a/tools/testing/selftests/bpf/verifier/bpf_loop_inline.c b/tools/testing/selftests/bpf/verifier/bpf_loop_inline.c
new file mode 100644
index 000000000000..a535d41dc20d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/bpf_loop_inline.c
@@ -0,0 +1,264 @@
+#define BTF_TYPES \
+ .btf_strings = "\0int\0i\0ctx\0callback\0main\0", \
+ .btf_types = { \
+ /* 1: int */ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), \
+ /* 2: int* */ BTF_PTR_ENC(1), \
+ /* 3: void* */ BTF_PTR_ENC(0), \
+ /* 4: int __(void*) */ BTF_FUNC_PROTO_ENC(1, 1), \
+ BTF_FUNC_PROTO_ARG_ENC(7, 3), \
+ /* 5: int __(int, int*) */ BTF_FUNC_PROTO_ENC(1, 2), \
+ BTF_FUNC_PROTO_ARG_ENC(5, 1), \
+ BTF_FUNC_PROTO_ARG_ENC(7, 2), \
+ /* 6: main */ BTF_FUNC_ENC(20, 4), \
+ /* 7: callback */ BTF_FUNC_ENC(11, 5), \
+ BTF_END_RAW \
+ }
+
+#define MAIN_TYPE 6
+#define CALLBACK_TYPE 7
+
+/* can't use BPF_CALL_REL, jit_subprogs adjusts IMM & OFF
+ * fields for pseudo calls
+ */
+#define PSEUDO_CALL_INSN() \
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_CALL, \
+ INSN_OFF_MASK, INSN_IMM_MASK)
+
+/* can't use BPF_FUNC_loop constant,
+ * do_mix_fixups adjusts the IMM field
+ */
+#define HELPER_CALL_INSN() \
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, INSN_OFF_MASK, INSN_IMM_MASK)
+
+{
+ "inline simple bpf_loop call",
+ .insns = {
+ /* main */
+ /* force verifier state branching to verify logic on first and
+ * subsequent bpf_loop insn processing steps
+ */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 777, 2),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 2),
+
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 6),
+ BPF_RAW_INSN(0, 0, 0, 0, 0),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* callback */
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_insns = { PSEUDO_CALL_INSN() },
+ .unexpected_insns = { HELPER_CALL_INSN() },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+ .runs = 0,
+ .func_info = { { 0, MAIN_TYPE }, { 12, CALLBACK_TYPE } },
+ .func_info_cnt = 2,
+ BTF_TYPES
+},
+{
+ "don't inline bpf_loop call, flags non-zero",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 9),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 0),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 7),
+ BPF_RAW_INSN(0, 0, 0, 0, 0),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -10),
+ /* callback */
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_insns = { HELPER_CALL_INSN() },
+ .unexpected_insns = { PSEUDO_CALL_INSN() },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+ .runs = 0,
+ .func_info = { { 0, MAIN_TYPE }, { 16, CALLBACK_TYPE } },
+ .func_info_cnt = 2,
+ BTF_TYPES
+},
+{
+ "don't inline bpf_loop call, callback non-constant",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 777, 4), /* pick a random callback */
+
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 10),
+ BPF_RAW_INSN(0, 0, 0, 0, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 8),
+ BPF_RAW_INSN(0, 0, 0, 0, 0),
+
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* callback */
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ /* callback #2 */
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_insns = { HELPER_CALL_INSN() },
+ .unexpected_insns = { PSEUDO_CALL_INSN() },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+ .runs = 0,
+ .func_info = {
+ { 0, MAIN_TYPE },
+ { 14, CALLBACK_TYPE },
+ { 16, CALLBACK_TYPE }
+ },
+ .func_info_cnt = 3,
+ BTF_TYPES
+},
+{
+ "bpf_loop_inline and a dead func",
+ .insns = {
+ /* main */
+
+ /* A reference to callback #1 to make verifier count it as a func.
+ * This reference is overwritten below and callback #1 is dead.
+ */
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 9),
+ BPF_RAW_INSN(0, 0, 0, 0, 0),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 8),
+ BPF_RAW_INSN(0, 0, 0, 0, 0),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* callback */
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ /* callback #2 */
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_insns = { PSEUDO_CALL_INSN() },
+ .unexpected_insns = { HELPER_CALL_INSN() },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+ .runs = 0,
+ .func_info = {
+ { 0, MAIN_TYPE },
+ { 10, CALLBACK_TYPE },
+ { 12, CALLBACK_TYPE }
+ },
+ .func_info_cnt = 3,
+ BTF_TYPES
+},
+{
+ "bpf_loop_inline stack locations for loop vars",
+ .insns = {
+ /* main */
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -12, 0x77),
+ /* bpf_loop call #1 */
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 22),
+ BPF_RAW_INSN(0, 0, 0, 0, 0),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
+ /* bpf_loop call #2 */
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 2),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 16),
+ BPF_RAW_INSN(0, 0, 0, 0, 0),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
+ /* call func and exit */
+ BPF_CALL_REL(2),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* func */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -32, 0x55),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_1, 2),
+ BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, BPF_REG_2, BPF_PSEUDO_FUNC, 0, 6),
+ BPF_RAW_INSN(0, 0, 0, 0, 0),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_3, 0),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_loop),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* callback */
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .expected_insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -12, 0x77),
+ SKIP_INSNS(),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, -24),
+ SKIP_INSNS(),
+ /* offsets are the same as in the first call */
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -40),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, -32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, -24),
+ SKIP_INSNS(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -32, 0x55),
+ SKIP_INSNS(),
+ /* offsets differ from main because of different offset
+ * in BPF_ST_MEM instruction
+ */
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -56),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, -48),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, -40),
+ },
+ .unexpected_insns = { HELPER_CALL_INSN() },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .result = ACCEPT,
+ .func_info = {
+ { 0, MAIN_TYPE },
+ { 16, MAIN_TYPE },
+ { 25, CALLBACK_TYPE },
+ },
+ .func_info_cnt = 3,
+ BTF_TYPES
+},
+{
+ "inline bpf_loop call in a big program",
+ .insns = {},
+ .fill_helper = bpf_fill_big_prog_with_loop_1,
+ .expected_insns = { PSEUDO_CALL_INSN() },
+ .unexpected_insns = { HELPER_CALL_INSN() },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info = { { 0, MAIN_TYPE }, { 16, CALLBACK_TYPE } },
+ .func_info_cnt = 2,
+ BTF_TYPES
+},
+
+#undef HELPER_CALL_INSN
+#undef PSEUDO_CALL_INSN
+#undef CALLBACK_TYPE
+#undef MAIN_TYPE
+#undef BTF_TYPES
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index 2e03decb11b6..3fb4f69b1962 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -139,6 +139,26 @@
},
},
{
+ "calls: invalid kfunc call: don't match first member type when passed to release kfunc",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "kernel function bpf_kfunc_call_memb1_release args#0 expected pointer",
+ .fixup_kfunc_btf_id = {
+ { "bpf_kfunc_call_memb_acquire", 1 },
+ { "bpf_kfunc_call_memb1_release", 5 },
+ },
+},
+{
"calls: invalid kfunc call: PTR_TO_BTF_ID with negative offset",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
@@ -199,6 +219,59 @@
.errstr = "variable ptr_ access var_off=(0x0; 0x7) disallowed",
},
{
+ "calls: invalid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_kfunc_btf_id = {
+ { "bpf_kfunc_call_test_acquire", 3 },
+ { "bpf_kfunc_call_test_ref", 8 },
+ { "bpf_kfunc_call_test_ref", 10 },
+ },
+ .result_unpriv = REJECT,
+ .result = REJECT,
+ .errstr = "R1 must be referenced",
+},
+{
+ "calls: valid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_kfunc_btf_id = {
+ { "bpf_kfunc_call_test_acquire", 3 },
+ { "bpf_kfunc_call_test_ref", 8 },
+ { "bpf_kfunc_call_test_release", 10 },
+ },
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+},
+{
"calls: basic sanity",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
diff --git a/tools/testing/selftests/bpf/verifier/jmp32.c b/tools/testing/selftests/bpf/verifier/jmp32.c
index 6ddc418fdfaf..1a27a6210554 100644
--- a/tools/testing/selftests/bpf/verifier/jmp32.c
+++ b/tools/testing/selftests/bpf/verifier/jmp32.c
@@ -864,3 +864,24 @@
.result = ACCEPT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
+{
+ "jeq32/jne32: bounds checking",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_6, 563),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0),
+ BPF_ALU32_REG(BPF_OR, BPF_REG_2, BPF_REG_6),
+ BPF_JMP32_IMM(BPF_JNE, BPF_REG_2, 8, 5),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_2, 500, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
+},
diff --git a/tools/testing/selftests/bpf/verifier/jump.c b/tools/testing/selftests/bpf/verifier/jump.c
index 6f951d1ff0a4..497fe17d2eaf 100644
--- a/tools/testing/selftests/bpf/verifier/jump.c
+++ b/tools/testing/selftests/bpf/verifier/jump.c
@@ -373,3 +373,25 @@
.result = ACCEPT,
.retval = 3,
},
+{
+ "jump & dead code elimination",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_3, 0),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_3, 0),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_3, 32767),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_3, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0x8000, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -32767),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 0, 1),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2,
+},
diff --git a/tools/testing/selftests/bpf/verifier/map_kptr.c b/tools/testing/selftests/bpf/verifier/map_kptr.c
new file mode 100644
index 000000000000..6914904344c0
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/map_kptr.c
@@ -0,0 +1,469 @@
+/* Common tests */
+{
+ "map_kptr: BPF_ST imm != 0",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "BPF_ST imm must be 0 when storing to kptr at off=0",
+},
+{
+ "map_kptr: size != bpf_size_to_bytes(BPF_DW)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "kptr access size must be BPF_DW",
+},
+{
+ "map_kptr: map_value non-const var_off",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "kptr access cannot have variable offset",
+},
+{
+ "map_kptr: bpf_kptr_xchg non-const var_off",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "R1 doesn't have constant offset. kptr has to be at the constant offset",
+},
+{
+ "map_kptr: unaligned boundary load/store",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "kptr access misaligned expected=0 off=7",
+},
+{
+ "map_kptr: reject var_off != 0",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "variable untrusted_ptr_ access var_off=(0x0; 0x7) disallowed",
+},
+/* Tests for unreferened PTR_TO_BTF_ID */
+{
+ "map_kptr: unref: reject btf_struct_ids_match == false",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "invalid kptr access, R1 type=untrusted_ptr_prog_test_ref_kfunc expected=ptr_prog_test",
+},
+{
+ "map_kptr: unref: loaded pointer marked as untrusted",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'untrusted_ptr_or_null_'",
+},
+{
+ "map_kptr: unref: correct in kernel type size",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "access beyond struct prog_test_ref_kfunc at off 32 size 8",
+},
+{
+ "map_kptr: unref: inherit PTR_UNTRUSTED on struct walk",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_this_cpu_ptr),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "R1 type=untrusted_ptr_ expected=percpu_ptr_",
+},
+{
+ "map_kptr: unref: no reference state created",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = ACCEPT,
+},
+{
+ "map_kptr: unref: bpf_kptr_xchg rejected",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "off=0 kptr isn't referenced kptr",
+},
+{
+ "map_kptr: unref: bpf_kfunc_call_test_kptr_get rejected",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "arg#0 no referenced kptr at map value offset=0",
+ .fixup_kfunc_btf_id = {
+ { "bpf_kfunc_call_test_kptr_get", 13 },
+ }
+},
+/* Tests for referenced PTR_TO_BTF_ID */
+{
+ "map_kptr: ref: loaded pointer marked as untrusted",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_this_cpu_ptr),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_",
+},
+{
+ "map_kptr: ref: reject off != 0",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "invalid kptr access, R2 type=ptr_prog_test_ref_kfunc expected=ptr_prog_test_member",
+},
+{
+ "map_kptr: ref: reference state created and released on xchg",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "Unreleased reference id=5 alloc_insn=20",
+ .fixup_kfunc_btf_id = {
+ { "bpf_kfunc_call_test_acquire", 15 },
+ }
+},
+{
+ "map_kptr: ref: reject STX",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "store to referenced kptr disallowed",
+},
+{
+ "map_kptr: ref: reject ST",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 8, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "store to referenced kptr disallowed",
+},
+{
+ "map_kptr: reject helper access to kptr",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "kptr cannot be accessed indirectly by helper",
+},
diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c
index fbd682520e47..57a83d763ec1 100644
--- a/tools/testing/selftests/bpf/verifier/ref_tracking.c
+++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c
@@ -796,7 +796,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "reference has not been acquired before",
+ .errstr = "R1 must be referenced when passed to release function",
},
{
/* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */
diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c
index 86b24cad27a7..d11d0b28be41 100644
--- a/tools/testing/selftests/bpf/verifier/sock.c
+++ b/tools/testing/selftests/bpf/verifier/sock.c
@@ -417,7 +417,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "reference has not been acquired before",
+ .errstr = "R1 must be referenced when passed to release function",
},
{
"bpf_sk_release(bpf_sk_fullsock(skb->sk))",
@@ -436,7 +436,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "reference has not been acquired before",
+ .errstr = "R1 must be referenced when passed to release function",
},
{
"bpf_sk_release(bpf_tcp_sock(skb->sk))",
@@ -455,7 +455,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "reference has not been acquired before",
+ .errstr = "R1 must be referenced when passed to release function",
},
{
"sk_storage_get(map, skb->sk, NULL, 0): value == NULL",
diff --git a/tools/testing/selftests/bpf/vmtest.sh b/tools/testing/selftests/bpf/vmtest.sh
index e0bb04a97e10..b86ae4a2e5c5 100755
--- a/tools/testing/selftests/bpf/vmtest.sh
+++ b/tools/testing/selftests/bpf/vmtest.sh
@@ -30,8 +30,7 @@ DEFAULT_COMMAND="./test_progs"
MOUNT_DIR="mnt"
ROOTFS_IMAGE="root.img"
OUTPUT_DIR="$HOME/.bpf_selftests"
-KCONFIG_URL="https://raw.githubusercontent.com/libbpf/libbpf/master/travis-ci/vmtest/configs/config-latest.${ARCH}"
-KCONFIG_API_URL="https://api.github.com/repos/libbpf/libbpf/contents/travis-ci/vmtest/configs/config-latest.${ARCH}"
+KCONFIG_REL_PATHS=("tools/testing/selftests/bpf/config" "tools/testing/selftests/bpf/config.${ARCH}")
INDEX_URL="https://raw.githubusercontent.com/libbpf/ci/master/INDEX"
NUM_COMPILE_JOBS="$(nproc)"
LOG_FILE_BASE="$(date +"bpf_selftests.%Y-%m-%d_%H-%M-%S")"
@@ -269,26 +268,42 @@ is_rel_path()
[[ ${path:0:1} != "/" ]]
}
+do_update_kconfig()
+{
+ local kernel_checkout="$1"
+ local kconfig_file="$2"
+
+ rm -f "$kconfig_file" 2> /dev/null
+
+ for config in "${KCONFIG_REL_PATHS[@]}"; do
+ local kconfig_src="${kernel_checkout}/${config}"
+ cat "$kconfig_src" >> "$kconfig_file"
+ done
+}
+
update_kconfig()
{
- local kconfig_file="$1"
- local update_command="curl -sLf ${KCONFIG_URL} -o ${kconfig_file}"
- # Github does not return the "last-modified" header when retrieving the
- # raw contents of the file. Use the API call to get the last-modified
- # time of the kernel config and only update the config if it has been
- # updated after the previously cached config was created. This avoids
- # unnecessarily compiling the kernel and selftests.
- if [[ -f "${kconfig_file}" ]]; then
- local last_modified_date="$(curl -sL -D - "${KCONFIG_API_URL}" -o /dev/null | \
- grep "last-modified" | awk -F ': ' '{print $2}')"
- local remote_modified_timestamp="$(date -d "${last_modified_date}" +"%s")"
- local local_creation_timestamp="$(stat -c %Y "${kconfig_file}")"
+ local kernel_checkout="$1"
+ local kconfig_file="$2"
- if [[ "${remote_modified_timestamp}" -gt "${local_creation_timestamp}" ]]; then
- ${update_command}
- fi
+ if [[ -f "${kconfig_file}" ]]; then
+ local local_modified="$(stat -c %Y "${kconfig_file}")"
+
+ for config in "${KCONFIG_REL_PATHS[@]}"; do
+ local kconfig_src="${kernel_checkout}/${config}"
+ local src_modified="$(stat -c %Y "${kconfig_src}")"
+ # Only update the config if it has been updated after the
+ # previously cached config was created. This avoids
+ # unnecessarily compiling the kernel and selftests.
+ if [[ "${src_modified}" -gt "${local_modified}" ]]; then
+ do_update_kconfig "$kernel_checkout" "$kconfig_file"
+ # Once we have found one outdated configuration
+ # there is no need to check other ones.
+ break
+ fi
+ done
else
- ${update_command}
+ do_update_kconfig "$kernel_checkout" "$kconfig_file"
fi
}
@@ -372,7 +387,7 @@ main()
mkdir -p "${OUTPUT_DIR}"
mkdir -p "${mount_dir}"
- update_kconfig "${kconfig_file}"
+ update_kconfig "${kernel_checkout}" "${kconfig_file}"
recompile_kernel "${kernel_checkout}" "${make_command}"
diff --git a/tools/testing/selftests/bpf/xdp_redirect_multi.c b/tools/testing/selftests/bpf/xdp_redirect_multi.c
index aaedbf4955c3..c03b3a75991f 100644
--- a/tools/testing/selftests/bpf/xdp_redirect_multi.c
+++ b/tools/testing/selftests/bpf/xdp_redirect_multi.c
@@ -10,7 +10,6 @@
#include <net/if.h>
#include <unistd.h>
#include <libgen.h>
-#include <sys/resource.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/socket.h>
diff --git a/tools/testing/selftests/bpf/xdp_synproxy.c b/tools/testing/selftests/bpf/xdp_synproxy.c
new file mode 100644
index 000000000000..d874ddfb39c4
--- /dev/null
+++ b/tools/testing/selftests/bpf/xdp_synproxy.c
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: LGPL-2.1 OR BSD-2-Clause
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <stdnoreturn.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <getopt.h>
+#include <signal.h>
+#include <sys/types.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include <net/if.h>
+#include <linux/if_link.h>
+#include <linux/limits.h>
+
+static unsigned int ifindex;
+static __u32 attached_prog_id;
+static bool attached_tc;
+
+static void noreturn cleanup(int sig)
+{
+ LIBBPF_OPTS(bpf_xdp_attach_opts, opts);
+ int prog_fd;
+ int err;
+
+ if (attached_prog_id == 0)
+ exit(0);
+
+ if (attached_tc) {
+ LIBBPF_OPTS(bpf_tc_hook, hook,
+ .ifindex = ifindex,
+ .attach_point = BPF_TC_INGRESS);
+
+ err = bpf_tc_hook_destroy(&hook);
+ if (err < 0) {
+ fprintf(stderr, "Error: bpf_tc_hook_destroy: %s\n", strerror(-err));
+ fprintf(stderr, "Failed to destroy the TC hook\n");
+ exit(1);
+ }
+ exit(0);
+ }
+
+ prog_fd = bpf_prog_get_fd_by_id(attached_prog_id);
+ if (prog_fd < 0) {
+ fprintf(stderr, "Error: bpf_prog_get_fd_by_id: %s\n", strerror(-prog_fd));
+ err = bpf_xdp_attach(ifindex, -1, 0, NULL);
+ if (err < 0) {
+ fprintf(stderr, "Error: bpf_set_link_xdp_fd: %s\n", strerror(-err));
+ fprintf(stderr, "Failed to detach XDP program\n");
+ exit(1);
+ }
+ } else {
+ opts.old_prog_fd = prog_fd;
+ err = bpf_xdp_attach(ifindex, -1, XDP_FLAGS_REPLACE, &opts);
+ close(prog_fd);
+ if (err < 0) {
+ fprintf(stderr, "Error: bpf_set_link_xdp_fd_opts: %s\n", strerror(-err));
+ /* Not an error if already replaced by someone else. */
+ if (err != -EEXIST) {
+ fprintf(stderr, "Failed to detach XDP program\n");
+ exit(1);
+ }
+ }
+ }
+ exit(0);
+}
+
+static noreturn void usage(const char *progname)
+{
+ fprintf(stderr, "Usage: %s [--iface <iface>|--prog <prog_id>] [--mss4 <mss ipv4> --mss6 <mss ipv6> --wscale <wscale> --ttl <ttl>] [--ports <port1>,<port2>,...] [--single] [--tc]\n",
+ progname);
+ exit(1);
+}
+
+static unsigned long parse_arg_ul(const char *progname, const char *arg, unsigned long limit)
+{
+ unsigned long res;
+ char *endptr;
+
+ errno = 0;
+ res = strtoul(arg, &endptr, 10);
+ if (errno != 0 || *endptr != '\0' || arg[0] == '\0' || res > limit)
+ usage(progname);
+
+ return res;
+}
+
+static void parse_options(int argc, char *argv[], unsigned int *ifindex, __u32 *prog_id,
+ __u64 *tcpipopts, char **ports, bool *single, bool *tc)
+{
+ static struct option long_options[] = {
+ { "help", no_argument, NULL, 'h' },
+ { "iface", required_argument, NULL, 'i' },
+ { "prog", required_argument, NULL, 'x' },
+ { "mss4", required_argument, NULL, 4 },
+ { "mss6", required_argument, NULL, 6 },
+ { "wscale", required_argument, NULL, 'w' },
+ { "ttl", required_argument, NULL, 't' },
+ { "ports", required_argument, NULL, 'p' },
+ { "single", no_argument, NULL, 's' },
+ { "tc", no_argument, NULL, 'c' },
+ { NULL, 0, NULL, 0 },
+ };
+ unsigned long mss4, mss6, wscale, ttl;
+ unsigned int tcpipopts_mask = 0;
+
+ if (argc < 2)
+ usage(argv[0]);
+
+ *ifindex = 0;
+ *prog_id = 0;
+ *tcpipopts = 0;
+ *ports = NULL;
+ *single = false;
+
+ while (true) {
+ int opt;
+
+ opt = getopt_long(argc, argv, "", long_options, NULL);
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'h':
+ usage(argv[0]);
+ break;
+ case 'i':
+ *ifindex = if_nametoindex(optarg);
+ if (*ifindex == 0)
+ usage(argv[0]);
+ break;
+ case 'x':
+ *prog_id = parse_arg_ul(argv[0], optarg, UINT32_MAX);
+ if (*prog_id == 0)
+ usage(argv[0]);
+ break;
+ case 4:
+ mss4 = parse_arg_ul(argv[0], optarg, UINT16_MAX);
+ tcpipopts_mask |= 1 << 0;
+ break;
+ case 6:
+ mss6 = parse_arg_ul(argv[0], optarg, UINT16_MAX);
+ tcpipopts_mask |= 1 << 1;
+ break;
+ case 'w':
+ wscale = parse_arg_ul(argv[0], optarg, 14);
+ tcpipopts_mask |= 1 << 2;
+ break;
+ case 't':
+ ttl = parse_arg_ul(argv[0], optarg, UINT8_MAX);
+ tcpipopts_mask |= 1 << 3;
+ break;
+ case 'p':
+ *ports = optarg;
+ break;
+ case 's':
+ *single = true;
+ break;
+ case 'c':
+ *tc = true;
+ break;
+ default:
+ usage(argv[0]);
+ }
+ }
+ if (optind < argc)
+ usage(argv[0]);
+
+ if (tcpipopts_mask == 0xf) {
+ if (mss4 == 0 || mss6 == 0 || wscale == 0 || ttl == 0)
+ usage(argv[0]);
+ *tcpipopts = (mss6 << 32) | (ttl << 24) | (wscale << 16) | mss4;
+ } else if (tcpipopts_mask != 0) {
+ usage(argv[0]);
+ }
+
+ if (*ifindex != 0 && *prog_id != 0)
+ usage(argv[0]);
+ if (*ifindex == 0 && *prog_id == 0)
+ usage(argv[0]);
+}
+
+static int syncookie_attach(const char *argv0, unsigned int ifindex, bool tc)
+{
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ char xdp_filename[PATH_MAX];
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ int prog_fd;
+ int err;
+
+ snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv0);
+ obj = bpf_object__open_file(xdp_filename, NULL);
+ err = libbpf_get_error(obj);
+ if (err < 0) {
+ fprintf(stderr, "Error: bpf_object__open_file: %s\n", strerror(-err));
+ return err;
+ }
+
+ err = bpf_object__load(obj);
+ if (err < 0) {
+ fprintf(stderr, "Error: bpf_object__open_file: %s\n", strerror(-err));
+ return err;
+ }
+
+ prog = bpf_object__find_program_by_name(obj, tc ? "syncookie_tc" : "syncookie_xdp");
+ if (!prog) {
+ fprintf(stderr, "Error: bpf_object__find_program_by_name: program was not found\n");
+ return -ENOENT;
+ }
+
+ prog_fd = bpf_program__fd(prog);
+
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ if (err < 0) {
+ fprintf(stderr, "Error: bpf_obj_get_info_by_fd: %s\n", strerror(-err));
+ goto out;
+ }
+ attached_tc = tc;
+ attached_prog_id = info.id;
+ signal(SIGINT, cleanup);
+ signal(SIGTERM, cleanup);
+ if (tc) {
+ LIBBPF_OPTS(bpf_tc_hook, hook,
+ .ifindex = ifindex,
+ .attach_point = BPF_TC_INGRESS);
+ LIBBPF_OPTS(bpf_tc_opts, opts,
+ .handle = 1,
+ .priority = 1,
+ .prog_fd = prog_fd);
+
+ err = bpf_tc_hook_create(&hook);
+ if (err < 0) {
+ fprintf(stderr, "Error: bpf_tc_hook_create: %s\n",
+ strerror(-err));
+ goto fail;
+ }
+ err = bpf_tc_attach(&hook, &opts);
+ if (err < 0) {
+ fprintf(stderr, "Error: bpf_tc_attach: %s\n",
+ strerror(-err));
+ goto fail;
+ }
+
+ } else {
+ err = bpf_xdp_attach(ifindex, prog_fd,
+ XDP_FLAGS_UPDATE_IF_NOEXIST, NULL);
+ if (err < 0) {
+ fprintf(stderr, "Error: bpf_set_link_xdp_fd: %s\n",
+ strerror(-err));
+ goto fail;
+ }
+ }
+ err = 0;
+out:
+ bpf_object__close(obj);
+ return err;
+fail:
+ signal(SIGINT, SIG_DFL);
+ signal(SIGTERM, SIG_DFL);
+ attached_prog_id = 0;
+ goto out;
+}
+
+static int syncookie_open_bpf_maps(__u32 prog_id, int *values_map_fd, int *ports_map_fd)
+{
+ struct bpf_prog_info prog_info;
+ __u32 map_ids[8];
+ __u32 info_len;
+ int prog_fd;
+ int err;
+ int i;
+
+ *values_map_fd = -1;
+ *ports_map_fd = -1;
+
+ prog_fd = bpf_prog_get_fd_by_id(prog_id);
+ if (prog_fd < 0) {
+ fprintf(stderr, "Error: bpf_prog_get_fd_by_id: %s\n", strerror(-prog_fd));
+ return prog_fd;
+ }
+
+ prog_info = (struct bpf_prog_info) {
+ .nr_map_ids = 8,
+ .map_ids = (__u64)map_ids,
+ };
+ info_len = sizeof(prog_info);
+
+ err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
+ if (err != 0) {
+ fprintf(stderr, "Error: bpf_obj_get_info_by_fd: %s\n", strerror(-err));
+ goto out;
+ }
+
+ if (prog_info.nr_map_ids < 2) {
+ fprintf(stderr, "Error: Found %u BPF maps, expected at least 2\n",
+ prog_info.nr_map_ids);
+ err = -ENOENT;
+ goto out;
+ }
+
+ for (i = 0; i < prog_info.nr_map_ids; i++) {
+ struct bpf_map_info map_info = {};
+ int map_fd;
+
+ err = bpf_map_get_fd_by_id(map_ids[i]);
+ if (err < 0) {
+ fprintf(stderr, "Error: bpf_map_get_fd_by_id: %s\n", strerror(-err));
+ goto err_close_map_fds;
+ }
+ map_fd = err;
+
+ info_len = sizeof(map_info);
+ err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
+ if (err != 0) {
+ fprintf(stderr, "Error: bpf_obj_get_info_by_fd: %s\n", strerror(-err));
+ close(map_fd);
+ goto err_close_map_fds;
+ }
+ if (strcmp(map_info.name, "values") == 0) {
+ *values_map_fd = map_fd;
+ continue;
+ }
+ if (strcmp(map_info.name, "allowed_ports") == 0) {
+ *ports_map_fd = map_fd;
+ continue;
+ }
+ close(map_fd);
+ }
+
+ if (*values_map_fd != -1 && *ports_map_fd != -1) {
+ err = 0;
+ goto out;
+ }
+
+ err = -ENOENT;
+
+err_close_map_fds:
+ if (*values_map_fd != -1)
+ close(*values_map_fd);
+ if (*ports_map_fd != -1)
+ close(*ports_map_fd);
+ *values_map_fd = -1;
+ *ports_map_fd = -1;
+
+out:
+ close(prog_fd);
+ return err;
+}
+
+int main(int argc, char *argv[])
+{
+ int values_map_fd, ports_map_fd;
+ __u64 tcpipopts;
+ bool firstiter;
+ __u64 prevcnt;
+ __u32 prog_id;
+ char *ports;
+ bool single;
+ int err = 0;
+ bool tc;
+
+ parse_options(argc, argv, &ifindex, &prog_id, &tcpipopts, &ports,
+ &single, &tc);
+
+ if (prog_id == 0) {
+ if (!tc) {
+ err = bpf_xdp_query_id(ifindex, 0, &prog_id);
+ if (err < 0) {
+ fprintf(stderr, "Error: bpf_get_link_xdp_id: %s\n",
+ strerror(-err));
+ goto out;
+ }
+ }
+ if (prog_id == 0) {
+ err = syncookie_attach(argv[0], ifindex, tc);
+ if (err < 0)
+ goto out;
+ prog_id = attached_prog_id;
+ }
+ }
+
+ err = syncookie_open_bpf_maps(prog_id, &values_map_fd, &ports_map_fd);
+ if (err < 0)
+ goto out;
+
+ if (ports) {
+ __u16 port_last = 0;
+ __u32 port_idx = 0;
+ char *p = ports;
+
+ fprintf(stderr, "Replacing allowed ports\n");
+
+ while (p && *p != '\0') {
+ char *token = strsep(&p, ",");
+ __u16 port;
+
+ port = parse_arg_ul(argv[0], token, UINT16_MAX);
+ err = bpf_map_update_elem(ports_map_fd, &port_idx, &port, BPF_ANY);
+ if (err != 0) {
+ fprintf(stderr, "Error: bpf_map_update_elem: %s\n", strerror(-err));
+ fprintf(stderr, "Failed to add port %u (index %u)\n",
+ port, port_idx);
+ goto out_close_maps;
+ }
+ fprintf(stderr, "Added port %u\n", port);
+ port_idx++;
+ }
+ err = bpf_map_update_elem(ports_map_fd, &port_idx, &port_last, BPF_ANY);
+ if (err != 0) {
+ fprintf(stderr, "Error: bpf_map_update_elem: %s\n", strerror(-err));
+ fprintf(stderr, "Failed to add the terminator value 0 (index %u)\n",
+ port_idx);
+ goto out_close_maps;
+ }
+ }
+
+ if (tcpipopts) {
+ __u32 key = 0;
+
+ fprintf(stderr, "Replacing TCP/IP options\n");
+
+ err = bpf_map_update_elem(values_map_fd, &key, &tcpipopts, BPF_ANY);
+ if (err != 0) {
+ fprintf(stderr, "Error: bpf_map_update_elem: %s\n", strerror(-err));
+ goto out_close_maps;
+ }
+ }
+
+ if ((ports || tcpipopts) && attached_prog_id == 0 && !single)
+ goto out_close_maps;
+
+ prevcnt = 0;
+ firstiter = true;
+ while (true) {
+ __u32 key = 1;
+ __u64 value;
+
+ err = bpf_map_lookup_elem(values_map_fd, &key, &value);
+ if (err != 0) {
+ fprintf(stderr, "Error: bpf_map_lookup_elem: %s\n", strerror(-err));
+ goto out_close_maps;
+ }
+ if (firstiter) {
+ prevcnt = value;
+ firstiter = false;
+ }
+ if (single) {
+ printf("Total SYNACKs generated: %llu\n", value);
+ break;
+ }
+ printf("SYNACKs generated: %llu (total %llu)\n", value - prevcnt, value);
+ prevcnt = value;
+ sleep(1);
+ }
+
+out_close_maps:
+ close(values_map_fd);
+ close(ports_map_fd);
+out:
+ return err == 0 ? 0 : 1;
+}
diff --git a/tools/testing/selftests/bpf/xdping.c b/tools/testing/selftests/bpf/xdping.c
index c567856fd1bc..5b6f977870f8 100644
--- a/tools/testing/selftests/bpf/xdping.c
+++ b/tools/testing/selftests/bpf/xdping.c
@@ -12,7 +12,6 @@
#include <string.h>
#include <unistd.h>
#include <libgen.h>
-#include <sys/resource.h>
#include <net/if.h>
#include <sys/types.h>
#include <sys/socket.h>
@@ -89,7 +88,6 @@ int main(int argc, char **argv)
{
__u32 mode_flags = XDP_FLAGS_DRV_MODE | XDP_FLAGS_SKB_MODE;
struct addrinfo *a, hints = { .ai_family = AF_INET };
- struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
__u16 count = XDPING_DEFAULT_COUNT;
struct pinginfo pinginfo = { 0 };
const char *optstr = "c:I:NsS";
@@ -167,10 +165,8 @@ int main(int argc, char **argv)
freeaddrinfo(a);
}
- if (setrlimit(RLIMIT_MEMLOCK, &r)) {
- perror("setrlimit(RLIMIT_MEMLOCK)");
- return 1;
- }
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
diff --git a/tools/lib/bpf/xsk.c b/tools/testing/selftests/bpf/xsk.c
index af136f73b09d..f2721a4ae7c5 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/testing/selftests/bpf/xsk.c
@@ -30,16 +30,10 @@
#include <sys/types.h>
#include <linux/if_link.h>
-#include "bpf.h"
-#include "libbpf.h"
-#include "libbpf_internal.h"
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
#include "xsk.h"
-/* entire xsk.h and xsk.c is going away in libbpf 1.0, so ignore all internal
- * uses of deprecated APIs
- */
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
-
#ifndef SOL_XDP
#define SOL_XDP 283
#endif
@@ -52,6 +46,8 @@
#define PF_XDP AF_XDP
#endif
+#define pr_warn(fmt, ...) fprintf(stderr, fmt, ##__VA_ARGS__)
+
enum xsk_prog {
XSK_PROG_FALLBACK,
XSK_PROG_REDIRECT_FLAGS,
@@ -286,11 +282,10 @@ out_mmap:
return err;
}
-DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4)
-int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
- __u64 size, struct xsk_ring_prod *fill,
- struct xsk_ring_cons *comp,
- const struct xsk_umem_config *usr_config)
+int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area,
+ __u64 size, struct xsk_ring_prod *fill,
+ struct xsk_ring_cons *comp,
+ const struct xsk_umem_config *usr_config)
{
struct xdp_umem_reg mr;
struct xsk_umem *umem;
@@ -351,25 +346,9 @@ struct xsk_umem_config_v1 {
__u32 frame_headroom;
};
-COMPAT_VERSION(xsk_umem__create_v0_0_2, xsk_umem__create, LIBBPF_0.0.2)
-int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area,
- __u64 size, struct xsk_ring_prod *fill,
- struct xsk_ring_cons *comp,
- const struct xsk_umem_config *usr_config)
-{
- struct xsk_umem_config config;
-
- memcpy(&config, usr_config, sizeof(struct xsk_umem_config_v1));
- config.flags = 0;
-
- return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp,
- &config);
-}
-
static enum xsk_prog get_xsk_prog(void)
{
enum xsk_prog detected = XSK_PROG_FALLBACK;
- __u32 size_out, retval, duration;
char data_in = 0, data_out;
struct bpf_insn insns[] = {
BPF_LD_MAP_FD(BPF_REG_1, 0),
@@ -378,6 +357,12 @@ static enum xsk_prog get_xsk_prog(void)
BPF_EMIT_CALL(BPF_FUNC_redirect_map),
BPF_EXIT_INSN(),
};
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .data_in = &data_in,
+ .data_size_in = 1,
+ .data_out = &data_out,
+ );
+
int prog_fd, map_fd, ret, insn_cnt = ARRAY_SIZE(insns);
map_fd = bpf_map_create(BPF_MAP_TYPE_XSKMAP, NULL, sizeof(int), sizeof(int), 1, NULL);
@@ -392,8 +377,8 @@ static enum xsk_prog get_xsk_prog(void)
return detected;
}
- ret = bpf_prog_test_run(prog_fd, 0, &data_in, 1, &data_out, &size_out, &retval, &duration);
- if (!ret && retval == XDP_PASS)
+ ret = bpf_prog_test_run_opts(prog_fd, &opts);
+ if (!ret && opts.retval == XDP_PASS)
detected = XSK_PROG_REDIRECT_FLAGS;
close(prog_fd);
close(map_fd);
@@ -510,7 +495,7 @@ static int xsk_create_bpf_link(struct xsk_socket *xsk)
int link_fd;
int err;
- err = bpf_get_link_xdp_id(ctx->ifindex, &prog_id, xsk->config.xdp_flags);
+ err = bpf_xdp_query_id(ctx->ifindex, xsk->config.xdp_flags, &prog_id);
if (err) {
pr_warn("getting XDP prog id failed\n");
return err;
@@ -536,6 +521,25 @@ static int xsk_create_bpf_link(struct xsk_socket *xsk)
return 0;
}
+/* Copy up to sz - 1 bytes from zero-terminated src string and ensure that dst
+ * is zero-terminated string no matter what (unless sz == 0, in which case
+ * it's a no-op). It's conceptually close to FreeBSD's strlcpy(), but differs
+ * in what is returned. Given this is internal helper, it's trivial to extend
+ * this, when necessary. Use this instead of strncpy inside libbpf source code.
+ */
+static inline void libbpf_strlcpy(char *dst, const char *src, size_t sz)
+{
+ size_t i;
+
+ if (sz == 0)
+ return;
+
+ sz--;
+ for (i = 0; i < sz && src[i]; i++)
+ dst[i] = src[i];
+ dst[i] = '\0';
+}
+
static int xsk_get_max_queues(struct xsk_socket *xsk)
{
struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
@@ -792,8 +796,8 @@ static int xsk_init_xdp_res(struct xsk_socket *xsk,
if (ctx->has_bpf_link)
err = xsk_create_bpf_link(xsk);
else
- err = bpf_set_link_xdp_fd(xsk->ctx->ifindex, ctx->prog_fd,
- xsk->config.xdp_flags);
+ err = bpf_xdp_attach(xsk->ctx->ifindex, ctx->prog_fd,
+ xsk->config.xdp_flags, NULL);
if (err)
goto err_attach_xdp_prog;
@@ -811,7 +815,7 @@ err_set_bpf_maps:
if (ctx->has_bpf_link)
close(ctx->link_fd);
else
- bpf_set_link_xdp_fd(ctx->ifindex, -1, 0);
+ bpf_xdp_detach(ctx->ifindex, 0, NULL);
err_attach_xdp_prog:
close(ctx->prog_fd);
err_load_xdp_prog:
@@ -862,7 +866,7 @@ static int __xsk_setup_xdp_prog(struct xsk_socket *_xdp, int *xsks_map_fd)
if (ctx->has_bpf_link)
err = xsk_link_lookup(ctx->ifindex, &prog_id, &ctx->link_fd);
else
- err = bpf_get_link_xdp_id(ctx->ifindex, &prog_id, xsk->config.xdp_flags);
+ err = bpf_xdp_query_id(ctx->ifindex, xsk->config.xdp_flags, &prog_id);
if (err)
return err;
@@ -876,6 +880,11 @@ static int __xsk_setup_xdp_prog(struct xsk_socket *_xdp, int *xsks_map_fd)
return err;
}
+int xsk_setup_xdp_prog_xsk(struct xsk_socket *xsk, int *xsks_map_fd)
+{
+ return __xsk_setup_xdp_prog(xsk, xsks_map_fd);
+}
+
static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex,
__u32 queue_id)
{
@@ -954,6 +963,7 @@ static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
ctx->fill = fill;
ctx->comp = comp;
list_add(&ctx->list, &umem->ctx_list);
+ ctx->has_bpf_link = xsk_probe_bpf_link();
return ctx;
}
@@ -1055,7 +1065,6 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
}
}
xsk->ctx = ctx;
- xsk->ctx->has_bpf_link = xsk_probe_bpf_link();
if (rx && !rx_setup_done) {
err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
@@ -1147,8 +1156,6 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
goto out_mmap_tx;
}
- ctx->prog_fd = -1;
-
if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
err = __xsk_setup_xdp_prog(xsk, NULL);
if (err)
@@ -1229,7 +1236,10 @@ void xsk_socket__delete(struct xsk_socket *xsk)
ctx = xsk->ctx;
umem = ctx->umem;
- if (ctx->prog_fd != -1) {
+
+ xsk_put_ctx(ctx, true);
+
+ if (!ctx->refcount) {
xsk_delete_bpf_maps(xsk);
close(ctx->prog_fd);
if (ctx->has_bpf_link)
@@ -1248,8 +1258,6 @@ void xsk_socket__delete(struct xsk_socket *xsk)
}
}
- xsk_put_ctx(ctx, true);
-
umem->refcount--;
/* Do not close an fd that also has an associated umem connected
* to it.
diff --git a/tools/lib/bpf/xsk.h b/tools/testing/selftests/bpf/xsk.h
index 64e9c57fd792..997723b0bfb2 100644
--- a/tools/lib/bpf/xsk.h
+++ b/tools/testing/selftests/bpf/xsk.h
@@ -9,15 +9,15 @@
* Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
*/
-#ifndef __LIBBPF_XSK_H
-#define __LIBBPF_XSK_H
+#ifndef __XSK_H
+#define __XSK_H
#include <stdio.h>
#include <stdint.h>
#include <stdbool.h>
#include <linux/if_xdp.h>
-#include "libbpf.h"
+#include <bpf/libbpf.h>
#ifdef __cplusplus
extern "C" {
@@ -251,9 +251,7 @@ static inline __u64 xsk_umem__add_offset_to_addr(__u64 addr)
return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr);
}
-LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
int xsk_umem__fd(const struct xsk_umem *umem);
-LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
int xsk_socket__fd(const struct xsk_socket *xsk);
#define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048
@@ -271,9 +269,8 @@ struct xsk_umem_config {
__u32 flags;
};
-LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
+int xsk_setup_xdp_prog_xsk(struct xsk_socket *xsk, int *xsks_map_fd);
int xsk_setup_xdp_prog(int ifindex, int *xsks_map_fd);
-LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
int xsk_socket__update_xskmap(struct xsk_socket *xsk, int xsks_map_fd);
/* Flags for the libbpf_flags field. */
@@ -288,32 +285,17 @@ struct xsk_socket_config {
};
/* Set config to NULL to get the default configuration. */
-LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
int xsk_umem__create(struct xsk_umem **umem,
void *umem_area, __u64 size,
struct xsk_ring_prod *fill,
struct xsk_ring_cons *comp,
const struct xsk_umem_config *config);
-LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
-int xsk_umem__create_v0_0_2(struct xsk_umem **umem,
- void *umem_area, __u64 size,
- struct xsk_ring_prod *fill,
- struct xsk_ring_cons *comp,
- const struct xsk_umem_config *config);
-LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
-int xsk_umem__create_v0_0_4(struct xsk_umem **umem,
- void *umem_area, __u64 size,
- struct xsk_ring_prod *fill,
- struct xsk_ring_cons *comp,
- const struct xsk_umem_config *config);
-LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
int xsk_socket__create(struct xsk_socket **xsk,
const char *ifname, __u32 queue_id,
struct xsk_umem *umem,
struct xsk_ring_cons *rx,
struct xsk_ring_prod *tx,
const struct xsk_socket_config *config);
-LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
const char *ifname,
__u32 queue_id, struct xsk_umem *umem,
@@ -324,13 +306,11 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
const struct xsk_socket_config *config);
/* Returns 0 for success and -EBUSY if the umem is still in use. */
-LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
int xsk_umem__delete(struct xsk_umem *umem);
-LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "AF_XDP support deprecated and moved to libxdp")
void xsk_socket__delete(struct xsk_socket *xsk);
#ifdef __cplusplus
} /* extern "C" */
#endif
-#endif /* __LIBBPF_XSK_H */
+#endif /* __XSK_H */
diff --git a/tools/testing/selftests/bpf/xsk_prereqs.sh b/tools/testing/selftests/bpf/xsk_prereqs.sh
index bf29d2549bee..a0b71723a818 100755
--- a/tools/testing/selftests/bpf/xsk_prereqs.sh
+++ b/tools/testing/selftests/bpf/xsk_prereqs.sh
@@ -8,15 +8,14 @@ ksft_xfail=2
ksft_xpass=3
ksft_skip=4
-SPECFILE=veth.spec
-XSKOBJ=xdpxceiver
+XSKOBJ=xskxceiver
validate_root_exec()
{
msg="skip all tests:"
if [ $UID != 0 ]; then
echo $msg must be run as root >&2
- test_exit $ksft_fail 2
+ test_exit $ksft_fail
else
return $ksft_pass
fi
@@ -27,39 +26,31 @@ validate_veth_support()
msg="skip all tests:"
if [ $(ip link add $1 type veth 2>/dev/null; echo $?;) != 0 ]; then
echo $msg veth kernel support not available >&2
- test_exit $ksft_skip 1
+ test_exit $ksft_skip
else
ip link del $1
return $ksft_pass
fi
}
-validate_veth_spec_file()
-{
- if [ ! -f ${SPECFILE} ]; then
- test_exit $ksft_skip 1
- fi
-}
-
test_status()
{
statusval=$1
- if [ $statusval -eq 2 ]; then
- echo -e "$2: [ FAIL ]"
- elif [ $statusval -eq 1 ]; then
- echo -e "$2: [ SKIPPED ]"
- elif [ $statusval -eq 0 ]; then
- echo -e "$2: [ PASS ]"
+ if [ $statusval -eq $ksft_fail ]; then
+ echo "$2: [ FAIL ]"
+ elif [ $statusval -eq $ksft_skip ]; then
+ echo "$2: [ SKIPPED ]"
+ elif [ $statusval -eq $ksft_pass ]; then
+ echo "$2: [ PASS ]"
fi
}
test_exit()
{
- retval=$1
- if [ $2 -ne 0 ]; then
- test_status $2 $(basename $0)
+ if [ $1 -ne 0 ]; then
+ test_status $1 $(basename $0)
fi
- exit $retval
+ exit 1
}
clear_configs()
@@ -74,9 +65,6 @@ clear_configs()
#veth node inside NS won't get removed so we explicitly remove it
[ $(ip link show $1 &>/dev/null; echo $?;) == 0 ] &&
{ ip link del $1; }
- if [ -f ${SPECFILE} ]; then
- rm -f ${SPECFILE}
- fi
}
cleanup_exit()
@@ -86,10 +74,19 @@ cleanup_exit()
validate_ip_utility()
{
- [ ! $(type -P ip) ] && { echo "'ip' not found. Skipping tests."; test_exit $ksft_skip 1; }
+ [ ! $(type -P ip) ] && { echo "'ip' not found. Skipping tests."; test_exit $ksft_skip; }
}
-execxdpxceiver()
+exec_xskxceiver()
{
- ./${XSKOBJ} -i ${VETH0} -i ${VETH1},${NS1} ${VERBOSE_ARG} ${DUMP_PKTS_ARG}
+ if [[ $busy_poll -eq 1 ]]; then
+ ARGS+="-b "
+ fi
+
+ ./${XSKOBJ} -i ${VETH0} -i ${VETH1},${NS1} ${ARGS}
+
+ retval=$?
+ test_status $retval "${TEST_NAME}"
+ statusList+=($retval)
+ nameList+=(${TEST_NAME})
}
diff --git a/tools/testing/selftests/bpf/xdpxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
index 5f8296d29e77..74d56d971baf 100644
--- a/tools/testing/selftests/bpf/xdpxceiver.c
+++ b/tools/testing/selftests/bpf/xskxceiver.c
@@ -90,18 +90,19 @@
#include <string.h>
#include <stddef.h>
#include <sys/mman.h>
-#include <sys/resource.h>
+#include <sys/socket.h>
+#include <sys/time.h>
#include <sys/types.h>
#include <sys/queue.h>
#include <time.h>
#include <unistd.h>
#include <stdatomic.h>
-#include <bpf/xsk.h>
-#include "xdpxceiver.h"
+#include "xsk.h"
+#include "xskxceiver.h"
#include "../kselftest.h"
/* AF_XDP APIs were moved into libxdp and marked as deprecated in libbpf.
- * Until xdpxceiver is either moved or re-writed into libxdp, suppress
+ * Until xskxceiver is either moved or re-writed into libxdp, suppress
* deprecation warnings in this file
*/
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
@@ -123,9 +124,17 @@ static void __exit_with_error(int error, const char *file, const char *func, int
#define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__)
#define mode_string(test) (test)->ifobj_tx->xdp_flags & XDP_FLAGS_SKB_MODE ? "SKB" : "DRV"
+#define busy_poll_string(test) (test)->ifobj_tx->busy_poll ? "BUSY-POLL " : ""
-#define print_ksft_result(test) \
- (ksft_test_result_pass("PASS: %s %s\n", mode_string(test), (test)->name))
+static void report_failure(struct test_spec *test)
+{
+ if (test->fail)
+ return;
+
+ ksft_test_result_fail("FAIL: %s %s%s\n", mode_string(test), busy_poll_string(test),
+ test->name);
+ test->fail = true;
+}
static void memset32_htonl(void *dest, u32 val, u32 size)
{
@@ -265,6 +274,26 @@ static int xsk_configure_umem(struct xsk_umem_info *umem, void *buffer, u64 size
return 0;
}
+static void enable_busy_poll(struct xsk_socket_info *xsk)
+{
+ int sock_opt;
+
+ sock_opt = 1;
+ if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL,
+ (void *)&sock_opt, sizeof(sock_opt)) < 0)
+ exit_with_error(errno);
+
+ sock_opt = 20;
+ if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL,
+ (void *)&sock_opt, sizeof(sock_opt)) < 0)
+ exit_with_error(errno);
+
+ sock_opt = BATCH_SIZE;
+ if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET,
+ (void *)&sock_opt, sizeof(sock_opt)) < 0)
+ exit_with_error(errno);
+}
+
static int xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem,
struct ifobject *ifobject, bool shared)
{
@@ -288,8 +317,8 @@ static int xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_inf
static struct option long_options[] = {
{"interface", required_argument, 0, 'i'},
- {"queue", optional_argument, 0, 'q'},
- {"dump-pkts", optional_argument, 0, 'D'},
+ {"busy-poll", no_argument, 0, 'b'},
+ {"dump-pkts", no_argument, 0, 'D'},
{"verbose", no_argument, 0, 'v'},
{0, 0, 0, 0}
};
@@ -300,9 +329,9 @@ static void usage(const char *prog)
" Usage: %s [OPTIONS]\n"
" Options:\n"
" -i, --interface Use interface\n"
- " -q, --queue=n Use queue n (default 0)\n"
" -D, --dump-pkts Dump packets L2 - L5\n"
- " -v, --verbose Verbose output\n";
+ " -v, --verbose Verbose output\n"
+ " -b, --busy-poll Enable busy poll\n";
ksft_print_msg(str, prog);
}
@@ -348,7 +377,7 @@ static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj
for (;;) {
char *sptr, *token;
- c = getopt_long(argc, argv, "i:Dv", long_options, &option_index);
+ c = getopt_long(argc, argv, "i:Dvb", long_options, &option_index);
if (c == -1)
break;
@@ -374,6 +403,10 @@ static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj
case 'v':
opt_verbose = true;
break;
+ case 'b':
+ ifobj_tx->busy_poll = true;
+ ifobj_rx->busy_poll = true;
+ break;
default:
usage(basename(argv[0]));
ksft_exit_xfail();
@@ -391,8 +424,10 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
ifobj->xsk = &ifobj->xsk_arr[0];
ifobj->use_poll = false;
- ifobj->pacing_on = true;
+ ifobj->use_fill_ring = true;
+ ifobj->release_rx = true;
ifobj->pkt_stream = test->pkt_stream_default;
+ ifobj->validation_func = NULL;
if (i == 0) {
ifobj->rx_on = false;
@@ -417,6 +452,7 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
test->current_step = 0;
test->total_steps = 1;
test->nb_sockets = 1;
+ test->fail = false;
}
static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx,
@@ -468,9 +504,10 @@ static struct pkt *pkt_stream_get_pkt(struct pkt_stream *pkt_stream, u32 pkt_nb)
return &pkt_stream->pkts[pkt_nb];
}
-static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream)
+static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent)
{
while (pkt_stream->rx_pkt_nb < pkt_stream->nb_pkts) {
+ (*pkts_sent)++;
if (pkt_stream->pkts[pkt_stream->rx_pkt_nb].valid)
return &pkt_stream->pkts[pkt_stream->rx_pkt_nb++];
pkt_stream->rx_pkt_nb++;
@@ -486,10 +523,16 @@ static void pkt_stream_delete(struct pkt_stream *pkt_stream)
static void pkt_stream_restore_default(struct test_spec *test)
{
- if (test->ifobj_tx->pkt_stream != test->pkt_stream_default) {
+ struct pkt_stream *tx_pkt_stream = test->ifobj_tx->pkt_stream;
+
+ if (tx_pkt_stream != test->pkt_stream_default) {
pkt_stream_delete(test->ifobj_tx->pkt_stream);
test->ifobj_tx->pkt_stream = test->pkt_stream_default;
}
+
+ if (test->ifobj_rx->pkt_stream != test->pkt_stream_default &&
+ test->ifobj_rx->pkt_stream != tx_pkt_stream)
+ pkt_stream_delete(test->ifobj_rx->pkt_stream);
test->ifobj_rx->pkt_stream = test->pkt_stream_default;
}
@@ -511,6 +554,16 @@ static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts)
return pkt_stream;
}
+static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr, u32 len)
+{
+ pkt->addr = addr;
+ pkt->len = len;
+ if (len > umem->frame_size - XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 2 - umem->frame_headroom)
+ pkt->valid = false;
+ else
+ pkt->valid = true;
+}
+
static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb_pkts, u32 pkt_len)
{
struct pkt_stream *pkt_stream;
@@ -522,14 +575,9 @@ static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb
pkt_stream->nb_pkts = nb_pkts;
for (i = 0; i < nb_pkts; i++) {
- pkt_stream->pkts[i].addr = (i % umem->num_frames) * umem->frame_size;
- pkt_stream->pkts[i].len = pkt_len;
+ pkt_set(umem, &pkt_stream->pkts[i], (i % umem->num_frames) * umem->frame_size,
+ pkt_len);
pkt_stream->pkts[i].payload = i;
-
- if (pkt_len > umem->frame_size)
- pkt_stream->pkts[i].valid = false;
- else
- pkt_stream->pkts[i].valid = true;
}
return pkt_stream;
@@ -557,15 +605,27 @@ static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int off
u32 i;
pkt_stream = pkt_stream_clone(umem, test->pkt_stream_default);
- for (i = 1; i < test->pkt_stream_default->nb_pkts; i += 2) {
- pkt_stream->pkts[i].addr = (i % umem->num_frames) * umem->frame_size + offset;
- pkt_stream->pkts[i].len = pkt_len;
- }
+ for (i = 1; i < test->pkt_stream_default->nb_pkts; i += 2)
+ pkt_set(umem, &pkt_stream->pkts[i],
+ (i % umem->num_frames) * umem->frame_size + offset, pkt_len);
test->ifobj_tx->pkt_stream = pkt_stream;
test->ifobj_rx->pkt_stream = pkt_stream;
}
+static void pkt_stream_receive_half(struct test_spec *test)
+{
+ struct xsk_umem_info *umem = test->ifobj_rx->umem;
+ struct pkt_stream *pkt_stream = test->ifobj_tx->pkt_stream;
+ u32 i;
+
+ test->ifobj_rx->pkt_stream = pkt_stream_generate(umem, pkt_stream->nb_pkts,
+ pkt_stream->pkts[0].len);
+ pkt_stream = test->ifobj_rx->pkt_stream;
+ for (i = 1; i < pkt_stream->nb_pkts; i += 2)
+ pkt_stream->pkts[i].valid = false;
+}
+
static struct pkt *pkt_generate(struct ifobject *ifobject, u32 pkt_nb)
{
struct pkt *pkt = pkt_stream_get_pkt(ifobject->pkt_stream, pkt_nb);
@@ -576,7 +636,7 @@ static struct pkt *pkt_generate(struct ifobject *ifobject, u32 pkt_nb)
if (!pkt)
return NULL;
- if (!pkt->valid || pkt->len < PKT_SIZE)
+ if (!pkt->valid || pkt->len < MIN_PKT_SIZE)
return pkt;
data = xsk_umem__get_data(ifobject->umem->buffer, pkt->addr);
@@ -663,8 +723,7 @@ static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt_stream *pkt
if (offset == expected_offset)
return true;
- ksft_test_result_fail("ERROR: [%s] expected [%u], got [%u]\n", __func__, expected_offset,
- offset);
+ ksft_print_msg("[%s] expected [%u], got [%u]\n", __func__, expected_offset, offset);
return false;
}
@@ -674,19 +733,18 @@ static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
struct iphdr *iphdr = (struct iphdr *)(data + sizeof(struct ethhdr));
if (!pkt) {
- ksft_test_result_fail("ERROR: [%s] too many packets received\n", __func__);
+ ksft_print_msg("[%s] too many packets received\n", __func__);
return false;
}
- if (len < PKT_SIZE) {
- /*Do not try to verify packets that are smaller than minimum size. */
+ if (len < MIN_PKT_SIZE || pkt->len < MIN_PKT_SIZE) {
+ /* Do not try to verify packets that are smaller than minimum size. */
return true;
}
if (pkt->len != len) {
- ksft_test_result_fail
- ("ERROR: [%s] expected length [%d], got length [%d]\n",
- __func__, pkt->len, len);
+ ksft_print_msg("[%s] expected length [%d], got length [%d]\n",
+ __func__, pkt->len, len);
return false;
}
@@ -697,9 +755,8 @@ static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len)
pkt_dump(data, PKT_SIZE);
if (pkt->payload != seqnum) {
- ksft_test_result_fail
- ("ERROR: [%s] expected seqnum [%d], got seqnum [%d]\n",
- __func__, pkt->payload, seqnum);
+ ksft_print_msg("[%s] expected seqnum [%d], got seqnum [%d]\n",
+ __func__, pkt->payload, seqnum);
return false;
}
} else {
@@ -717,12 +774,25 @@ static void kick_tx(struct xsk_socket_info *xsk)
int ret;
ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
- if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN)
+ if (ret >= 0)
+ return;
+ if (errno == ENOBUFS || errno == EAGAIN || errno == EBUSY || errno == ENETDOWN) {
+ usleep(100);
return;
+ }
exit_with_error(errno);
}
-static void complete_pkts(struct xsk_socket_info *xsk, int batch_size)
+static void kick_rx(struct xsk_socket_info *xsk)
+{
+ int ret;
+
+ ret = recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
+ if (ret < 0)
+ exit_with_error(errno);
+}
+
+static int complete_pkts(struct xsk_socket_info *xsk, int batch_size)
{
unsigned int rcvd;
u32 idx;
@@ -735,26 +805,45 @@ static void complete_pkts(struct xsk_socket_info *xsk, int batch_size)
if (rcvd > xsk->outstanding_tx) {
u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1);
- ksft_test_result_fail("ERROR: [%s] Too many packets completed\n",
- __func__);
+ ksft_print_msg("[%s] Too many packets completed\n", __func__);
ksft_print_msg("Last completion address: %llx\n", addr);
- return;
+ return TEST_FAILURE;
}
xsk_ring_cons__release(&xsk->umem->cq, rcvd);
xsk->outstanding_tx -= rcvd;
}
+
+ return TEST_PASS;
}
-static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info *xsk,
- struct pollfd *fds)
+static int receive_pkts(struct ifobject *ifobj, struct pollfd *fds)
{
- struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream);
+ struct timeval tv_end, tv_now, tv_timeout = {RECV_TMOUT, 0};
+ u32 idx_rx = 0, idx_fq = 0, rcvd, i, pkts_sent = 0;
+ struct pkt_stream *pkt_stream = ifobj->pkt_stream;
+ struct xsk_socket_info *xsk = ifobj->xsk;
struct xsk_umem_info *umem = xsk->umem;
- u32 idx_rx = 0, idx_fq = 0, rcvd, i;
+ struct pkt *pkt;
int ret;
+ ret = gettimeofday(&tv_now, NULL);
+ if (ret)
+ exit_with_error(errno);
+ timeradd(&tv_now, &tv_timeout, &tv_end);
+
+ pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
while (pkt) {
+ ret = gettimeofday(&tv_now, NULL);
+ if (ret)
+ exit_with_error(errno);
+ if (timercmp(&tv_now, &tv_end, >)) {
+ ksft_print_msg("ERROR: [%s] Receive loop timed out\n", __func__);
+ return TEST_FAILURE;
+ }
+
+ kick_rx(xsk);
+
rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
if (!rcvd) {
if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
@@ -765,54 +854,53 @@ static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info *
continue;
}
- ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
- while (ret != rcvd) {
- if (ret < 0)
- exit_with_error(-ret);
- if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
- ret = poll(fds, 1, POLL_TMOUT);
+ if (ifobj->use_fill_ring) {
+ ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
+ while (ret != rcvd) {
if (ret < 0)
exit_with_error(-ret);
+ if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
+ ret = poll(fds, 1, POLL_TMOUT);
+ if (ret < 0)
+ exit_with_error(-ret);
+ }
+ ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
}
- ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
}
for (i = 0; i < rcvd; i++) {
const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++);
u64 addr = desc->addr, orig;
- if (!pkt) {
- ksft_test_result_fail("ERROR: [%s] Received too many packets.\n",
- __func__);
- ksft_print_msg("Last packet has addr: %llx len: %u\n",
- addr, desc->len);
- return;
- }
-
orig = xsk_umem__extract_addr(addr);
addr = xsk_umem__add_offset_to_addr(addr);
- if (!is_pkt_valid(pkt, umem->buffer, addr, desc->len))
- return;
- if (!is_offset_correct(umem, pkt_stream, addr, pkt->addr))
- return;
+ if (!is_pkt_valid(pkt, umem->buffer, addr, desc->len) ||
+ !is_offset_correct(umem, pkt_stream, addr, pkt->addr))
+ return TEST_FAILURE;
- *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
- pkt = pkt_stream_get_next_rx_pkt(pkt_stream);
+ if (ifobj->use_fill_ring)
+ *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = orig;
+ pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &pkts_sent);
}
- xsk_ring_prod__submit(&umem->fq, rcvd);
- xsk_ring_cons__release(&xsk->rx, rcvd);
+ if (ifobj->use_fill_ring)
+ xsk_ring_prod__submit(&umem->fq, rcvd);
+ if (ifobj->release_rx)
+ xsk_ring_cons__release(&xsk->rx, rcvd);
pthread_mutex_lock(&pacing_mutex);
- pkts_in_flight -= rcvd;
+ pkts_in_flight -= pkts_sent;
if (pkts_in_flight < umem->num_frames)
pthread_cond_signal(&pacing_cond);
pthread_mutex_unlock(&pacing_mutex);
+ pkts_sent = 0;
}
+
+ return TEST_PASS;
}
-static u32 __send_pkts(struct ifobject *ifobject, u32 pkt_nb)
+static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb)
{
struct xsk_socket_info *xsk = ifobject->xsk;
u32 i, idx, valid_pkts = 0;
@@ -822,21 +910,22 @@ static u32 __send_pkts(struct ifobject *ifobject, u32 pkt_nb)
for (i = 0; i < BATCH_SIZE; i++) {
struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i);
- struct pkt *pkt = pkt_generate(ifobject, pkt_nb);
+ struct pkt *pkt = pkt_generate(ifobject, *pkt_nb);
if (!pkt)
break;
tx_desc->addr = pkt->addr;
tx_desc->len = pkt->len;
- pkt_nb++;
+ (*pkt_nb)++;
if (pkt->valid)
valid_pkts++;
}
pthread_mutex_lock(&pacing_mutex);
pkts_in_flight += valid_pkts;
- if (ifobject->pacing_on && pkts_in_flight >= ifobject->umem->num_frames - BATCH_SIZE) {
+ /* pkts_in_flight might be negative if many invalid packets are sent */
+ if (pkts_in_flight >= (int)(ifobject->umem->num_frames - BATCH_SIZE)) {
kick_tx(xsk);
pthread_cond_wait(&pacing_cond, &pacing_mutex);
}
@@ -844,10 +933,11 @@ static u32 __send_pkts(struct ifobject *ifobject, u32 pkt_nb)
xsk_ring_prod__submit(&xsk->tx, i);
xsk->outstanding_tx += valid_pkts;
- complete_pkts(xsk, i);
+ if (complete_pkts(xsk, i))
+ return TEST_FAILURE;
usleep(10);
- return i;
+ return TEST_PASS;
}
static void wait_for_tx_completion(struct xsk_socket_info *xsk)
@@ -856,7 +946,7 @@ static void wait_for_tx_completion(struct xsk_socket_info *xsk)
complete_pkts(xsk, BATCH_SIZE);
}
-static void send_pkts(struct ifobject *ifobject)
+static int send_pkts(struct test_spec *test, struct ifobject *ifobject)
{
struct pollfd fds = { };
u32 pkt_cnt = 0;
@@ -865,6 +955,8 @@ static void send_pkts(struct ifobject *ifobject)
fds.events = POLLOUT;
while (pkt_cnt < ifobject->pkt_stream->nb_pkts) {
+ int err;
+
if (ifobject->use_poll) {
int ret;
@@ -876,58 +968,95 @@ static void send_pkts(struct ifobject *ifobject)
continue;
}
- pkt_cnt += __send_pkts(ifobject, pkt_cnt);
+ err = __send_pkts(ifobject, &pkt_cnt);
+ if (err || test->fail)
+ return TEST_FAILURE;
}
wait_for_tx_completion(ifobject->xsk);
+ return TEST_PASS;
}
-static bool rx_stats_are_valid(struct ifobject *ifobject)
+static int get_xsk_stats(struct xsk_socket *xsk, struct xdp_statistics *stats)
+{
+ int fd = xsk_socket__fd(xsk), err;
+ socklen_t optlen, expected_len;
+
+ optlen = sizeof(*stats);
+ err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, stats, &optlen);
+ if (err) {
+ ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
+ __func__, -err, strerror(-err));
+ return TEST_FAILURE;
+ }
+
+ expected_len = sizeof(struct xdp_statistics);
+ if (optlen != expected_len) {
+ ksft_print_msg("[%s] getsockopt optlen error. Expected: %u got: %u\n",
+ __func__, expected_len, optlen);
+ return TEST_FAILURE;
+ }
+
+ return TEST_PASS;
+}
+
+static int validate_rx_dropped(struct ifobject *ifobject)
{
- u32 xsk_stat = 0, expected_stat = ifobject->pkt_stream->nb_pkts;
struct xsk_socket *xsk = ifobject->xsk->xsk;
- int fd = xsk_socket__fd(xsk);
struct xdp_statistics stats;
- socklen_t optlen;
int err;
- optlen = sizeof(stats);
- err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
- if (err) {
- ksft_test_result_fail("ERROR Rx: [%s] getsockopt(XDP_STATISTICS) error %u %s\n",
- __func__, -err, strerror(-err));
- return true;
- }
+ kick_rx(ifobject->xsk);
- if (optlen == sizeof(struct xdp_statistics)) {
- switch (stat_test_type) {
- case STAT_TEST_RX_DROPPED:
- xsk_stat = stats.rx_dropped;
- break;
- case STAT_TEST_TX_INVALID:
- return true;
- case STAT_TEST_RX_FULL:
- xsk_stat = stats.rx_ring_full;
- if (ifobject->umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS)
- expected_stat = ifobject->umem->num_frames - RX_FULL_RXQSIZE;
- else
- expected_stat = XSK_RING_PROD__DEFAULT_NUM_DESCS - RX_FULL_RXQSIZE;
- break;
- case STAT_TEST_RX_FILL_EMPTY:
- xsk_stat = stats.rx_fill_ring_empty_descs;
- break;
- default:
- break;
- }
+ err = get_xsk_stats(xsk, &stats);
+ if (err)
+ return TEST_FAILURE;
- if (xsk_stat == expected_stat)
- return true;
- }
+ if (stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2)
+ return TEST_PASS;
- return false;
+ return TEST_FAILURE;
}
-static void tx_stats_validate(struct ifobject *ifobject)
+static int validate_rx_full(struct ifobject *ifobject)
+{
+ struct xsk_socket *xsk = ifobject->xsk->xsk;
+ struct xdp_statistics stats;
+ int err;
+
+ usleep(1000);
+ kick_rx(ifobject->xsk);
+
+ err = get_xsk_stats(xsk, &stats);
+ if (err)
+ return TEST_FAILURE;
+
+ if (stats.rx_ring_full)
+ return TEST_PASS;
+
+ return TEST_FAILURE;
+}
+
+static int validate_fill_empty(struct ifobject *ifobject)
+{
+ struct xsk_socket *xsk = ifobject->xsk->xsk;
+ struct xdp_statistics stats;
+ int err;
+
+ usleep(1000);
+ kick_rx(ifobject->xsk);
+
+ err = get_xsk_stats(xsk, &stats);
+ if (err)
+ return TEST_FAILURE;
+
+ if (stats.rx_fill_ring_empty_descs)
+ return TEST_PASS;
+
+ return TEST_FAILURE;
+}
+
+static int validate_tx_invalid_descs(struct ifobject *ifobject)
{
struct xsk_socket *xsk = ifobject->xsk->xsk;
int fd = xsk_socket__fd(xsk);
@@ -938,22 +1067,25 @@ static void tx_stats_validate(struct ifobject *ifobject)
optlen = sizeof(stats);
err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
if (err) {
- ksft_test_result_fail("ERROR Tx: [%s] getsockopt(XDP_STATISTICS) error %u %s\n",
- __func__, -err, strerror(-err));
- return;
+ ksft_print_msg("[%s] getsockopt(XDP_STATISTICS) error %u %s\n",
+ __func__, -err, strerror(-err));
+ return TEST_FAILURE;
}
- if (stats.tx_invalid_descs == ifobject->pkt_stream->nb_pkts)
- return;
+ if (stats.tx_invalid_descs != ifobject->pkt_stream->nb_pkts / 2) {
+ ksft_print_msg("[%s] tx_invalid_descs incorrect. Got [%u] expected [%u]\n",
+ __func__, stats.tx_invalid_descs, ifobject->pkt_stream->nb_pkts);
+ return TEST_FAILURE;
+ }
- ksft_test_result_fail("ERROR: [%s] tx_invalid_descs incorrect. Got [%u] expected [%u]\n",
- __func__, stats.tx_invalid_descs, ifobject->pkt_stream->nb_pkts);
+ return TEST_PASS;
}
static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
{
u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size;
int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
+ LIBBPF_OPTS(bpf_xdp_query_opts, opts);
int ret, ifindex;
void *bufs;
u32 i;
@@ -985,6 +1117,9 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
exit_with_error(-ret);
usleep(USLEEP_MAX);
}
+
+ if (ifobject->busy_poll)
+ enable_busy_poll(&ifobject->xsk_arr[i]);
}
ifobject->xsk = &ifobject->xsk_arr[0];
@@ -996,10 +1131,26 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
if (!ifindex)
exit_with_error(errno);
- ret = xsk_setup_xdp_prog(ifindex, &ifobject->xsk_map_fd);
+ ret = xsk_setup_xdp_prog_xsk(ifobject->xsk->xsk, &ifobject->xsk_map_fd);
if (ret)
exit_with_error(-ret);
+ ret = bpf_xdp_query(ifindex, ifobject->xdp_flags, &opts);
+ if (ret)
+ exit_with_error(-ret);
+
+ if (ifobject->xdp_flags & XDP_FLAGS_SKB_MODE) {
+ if (opts.attach_mode != XDP_ATTACHED_SKB) {
+ ksft_print_msg("ERROR: [%s] XDP prog not in SKB mode\n");
+ exit_with_error(-EINVAL);
+ }
+ } else if (ifobject->xdp_flags & XDP_FLAGS_DRV_MODE) {
+ if (opts.attach_mode != XDP_ATTACHED_DRV) {
+ ksft_print_msg("ERROR: [%s] XDP prog not in DRV mode\n");
+ exit_with_error(-EINVAL);
+ }
+ }
+
ret = xsk_socket__update_xskmap(ifobject->xsk->xsk, ifobject->xsk_map_fd);
if (ret)
exit_with_error(-ret);
@@ -1017,18 +1168,21 @@ static void *worker_testapp_validate_tx(void *arg)
{
struct test_spec *test = (struct test_spec *)arg;
struct ifobject *ifobject = test->ifobj_tx;
+ int err;
if (test->current_step == 1)
thread_common_ops(test, ifobject);
print_verbose("Sending %d packets on interface %s\n", ifobject->pkt_stream->nb_pkts,
ifobject->ifname);
- send_pkts(ifobject);
+ err = send_pkts(test, ifobject);
- if (stat_test_type == STAT_TEST_TX_INVALID)
- tx_stats_validate(ifobject);
+ if (!err && ifobject->validation_func)
+ err = ifobject->validation_func(ifobject);
+ if (err)
+ report_failure(test);
- if (test->total_steps == test->current_step)
+ if (test->total_steps == test->current_step || err)
testapp_cleanup_xsk_res(ifobject);
pthread_exit(NULL);
}
@@ -1069,6 +1223,7 @@ static void *worker_testapp_validate_rx(void *arg)
struct test_spec *test = (struct test_spec *)arg;
struct ifobject *ifobject = test->ifobj_rx;
struct pollfd fds = { };
+ int err;
if (test->current_step == 1)
thread_common_ops(test, ifobject);
@@ -1080,18 +1235,23 @@ static void *worker_testapp_validate_rx(void *arg)
pthread_barrier_wait(&barr);
- if (test_type == TEST_TYPE_STATS)
- while (!rx_stats_are_valid(ifobject))
- continue;
- else
- receive_pkts(ifobject->pkt_stream, ifobject->xsk, &fds);
+ err = receive_pkts(ifobject, &fds);
+
+ if (!err && ifobject->validation_func)
+ err = ifobject->validation_func(ifobject);
+ if (err) {
+ report_failure(test);
+ pthread_mutex_lock(&pacing_mutex);
+ pthread_cond_signal(&pacing_cond);
+ pthread_mutex_unlock(&pacing_mutex);
+ }
- if (test->total_steps == test->current_step)
+ if (test->total_steps == test->current_step || err)
testapp_cleanup_xsk_res(ifobject);
pthread_exit(NULL);
}
-static void testapp_validate_traffic(struct test_spec *test)
+static int testapp_validate_traffic(struct test_spec *test)
{
struct ifobject *ifobj_tx = test->ifobj_tx;
struct ifobject *ifobj_rx = test->ifobj_rx;
@@ -1116,6 +1276,8 @@ static void testapp_validate_traffic(struct test_spec *test)
pthread_join(t1, NULL);
pthread_join(t0, NULL);
+
+ return !!test->fail;
}
static void testapp_teardown(struct test_spec *test)
@@ -1124,7 +1286,8 @@ static void testapp_teardown(struct test_spec *test)
test_spec_set_name(test, "TEARDOWN");
for (i = 0; i < MAX_TEARDOWN_ITER; i++) {
- testapp_validate_traffic(test);
+ if (testapp_validate_traffic(test))
+ return;
test_spec_reset(test);
}
}
@@ -1147,7 +1310,8 @@ static void testapp_bidi(struct test_spec *test)
test->ifobj_tx->rx_on = true;
test->ifobj_rx->tx_on = true;
test->total_steps = 2;
- testapp_validate_traffic(test);
+ if (testapp_validate_traffic(test))
+ return;
print_verbose("Switching Tx/Rx vectors\n");
swap_directions(&test->ifobj_rx, &test->ifobj_tx);
@@ -1175,7 +1339,8 @@ static void testapp_bpf_res(struct test_spec *test)
test_spec_set_name(test, "BPF_RES");
test->total_steps = 2;
test->nb_sockets = 2;
- testapp_validate_traffic(test);
+ if (testapp_validate_traffic(test))
+ return;
swap_xsk_resources(test->ifobj_tx, test->ifobj_rx);
testapp_validate_traffic(test);
@@ -1188,53 +1353,58 @@ static void testapp_headroom(struct test_spec *test)
testapp_validate_traffic(test);
}
-static void testapp_stats(struct test_spec *test)
+static void testapp_stats_rx_dropped(struct test_spec *test)
{
- int i;
+ test_spec_set_name(test, "STAT_RX_DROPPED");
+ test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size -
+ XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3;
+ pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0);
+ pkt_stream_receive_half(test);
+ test->ifobj_rx->validation_func = validate_rx_dropped;
+ testapp_validate_traffic(test);
+}
- for (i = 0; i < STAT_TEST_TYPE_MAX; i++) {
- test_spec_reset(test);
- stat_test_type = i;
- /* No or few packets will be received so cannot pace packets */
- test->ifobj_tx->pacing_on = false;
-
- switch (stat_test_type) {
- case STAT_TEST_RX_DROPPED:
- test_spec_set_name(test, "STAT_RX_DROPPED");
- test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size -
- XDP_PACKET_HEADROOM - 1;
- testapp_validate_traffic(test);
- break;
- case STAT_TEST_RX_FULL:
- test_spec_set_name(test, "STAT_RX_FULL");
- test->ifobj_rx->xsk->rxqsize = RX_FULL_RXQSIZE;
- testapp_validate_traffic(test);
- break;
- case STAT_TEST_TX_INVALID:
- test_spec_set_name(test, "STAT_TX_INVALID");
- pkt_stream_replace(test, DEFAULT_PKT_CNT, XSK_UMEM__INVALID_FRAME_SIZE);
- testapp_validate_traffic(test);
+static void testapp_stats_tx_invalid_descs(struct test_spec *test)
+{
+ test_spec_set_name(test, "STAT_TX_INVALID");
+ pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0);
+ test->ifobj_tx->validation_func = validate_tx_invalid_descs;
+ testapp_validate_traffic(test);
- pkt_stream_restore_default(test);
- break;
- case STAT_TEST_RX_FILL_EMPTY:
- test_spec_set_name(test, "STAT_RX_FILL_EMPTY");
- test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, 0,
- MIN_PKT_SIZE);
- if (!test->ifobj_rx->pkt_stream)
- exit_with_error(ENOMEM);
- test->ifobj_rx->pkt_stream->use_addr_for_fill = true;
- testapp_validate_traffic(test);
-
- pkt_stream_restore_default(test);
- break;
- default:
- break;
- }
- }
+ pkt_stream_restore_default(test);
+}
- /* To only see the whole stat set being completed unless an individual test fails. */
- test_spec_set_name(test, "STATS");
+static void testapp_stats_rx_full(struct test_spec *test)
+{
+ test_spec_set_name(test, "STAT_RX_FULL");
+ pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, PKT_SIZE);
+ test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
+ DEFAULT_UMEM_BUFFERS, PKT_SIZE);
+ if (!test->ifobj_rx->pkt_stream)
+ exit_with_error(ENOMEM);
+
+ test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS;
+ test->ifobj_rx->release_rx = false;
+ test->ifobj_rx->validation_func = validate_rx_full;
+ testapp_validate_traffic(test);
+
+ pkt_stream_restore_default(test);
+}
+
+static void testapp_stats_fill_empty(struct test_spec *test)
+{
+ test_spec_set_name(test, "STAT_RX_FILL_EMPTY");
+ pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, PKT_SIZE);
+ test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem,
+ DEFAULT_UMEM_BUFFERS, PKT_SIZE);
+ if (!test->ifobj_rx->pkt_stream)
+ exit_with_error(ENOMEM);
+
+ test->ifobj_rx->use_fill_ring = false;
+ test->ifobj_rx->validation_func = validate_fill_empty;
+ testapp_validate_traffic(test);
+
+ pkt_stream_restore_default(test);
}
/* Simple test */
@@ -1283,10 +1453,10 @@ static void testapp_single_pkt(struct test_spec *test)
static void testapp_invalid_desc(struct test_spec *test)
{
struct pkt pkts[] = {
- /* Zero packet length at address zero allowed */
- {0, 0, 0, true},
- /* Zero packet length allowed */
- {0x1000, 0, 0, true},
+ /* Zero packet address allowed */
+ {0, PKT_SIZE, 0, true},
+ /* Allowed packet */
+ {0x1000, PKT_SIZE, 0, true},
/* Straddling the start of umem */
{-2, PKT_SIZE, 0, false},
/* Packet too large */
@@ -1339,14 +1509,18 @@ static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *
static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_type type)
{
- test_type = type;
-
- /* reset defaults after potential previous test */
- stat_test_type = -1;
-
- switch (test_type) {
- case TEST_TYPE_STATS:
- testapp_stats(test);
+ switch (type) {
+ case TEST_TYPE_STATS_RX_DROPPED:
+ testapp_stats_rx_dropped(test);
+ break;
+ case TEST_TYPE_STATS_TX_INVALID_DESCS:
+ testapp_stats_tx_invalid_descs(test);
+ break;
+ case TEST_TYPE_STATS_RX_FULL:
+ testapp_stats_rx_full(test);
+ break;
+ case TEST_TYPE_STATS_FILL_EMPTY:
+ testapp_stats_fill_empty(test);
break;
case TEST_TYPE_TEARDOWN:
testapp_teardown(test);
@@ -1369,7 +1543,7 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_
test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE");
test->ifobj_tx->umem->frame_size = 2048;
test->ifobj_rx->umem->frame_size = 2048;
- pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE);
+ pkt_stream_replace(test, DEFAULT_PKT_CNT, PKT_SIZE);
testapp_validate_traffic(test);
pkt_stream_restore_default(test);
@@ -1411,7 +1585,9 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_
break;
}
- print_ksft_result(test);
+ if (!test->fail)
+ ksft_test_result_pass("PASS: %s %s%s\n", mode_string(test), busy_poll_string(test),
+ test->name);
}
static struct ifobject *ifobject_create(void)
@@ -1448,14 +1624,13 @@ static void ifobject_delete(struct ifobject *ifobj)
int main(int argc, char **argv)
{
- struct rlimit _rlim = { RLIM_INFINITY, RLIM_INFINITY };
struct pkt_stream *pkt_stream_default;
struct ifobject *ifobj_tx, *ifobj_rx;
+ u32 i, j, failed_tests = 0;
struct test_spec test;
- u32 i, j;
- if (setrlimit(RLIMIT_MEMLOCK, &_rlim))
- exit_with_error(errno);
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
ifobj_tx = ifobject_create();
if (!ifobj_tx)
@@ -1491,12 +1666,17 @@ int main(int argc, char **argv)
test_spec_init(&test, ifobj_tx, ifobj_rx, i);
run_pkt_test(&test, i, j);
usleep(USLEEP_MAX);
+
+ if (test.fail)
+ failed_tests++;
}
pkt_stream_delete(pkt_stream_default);
ifobject_delete(ifobj_tx);
ifobject_delete(ifobj_rx);
- ksft_exit_pass();
- return 0;
+ if (failed_tests)
+ ksft_exit_fail();
+ else
+ ksft_exit_pass();
}
diff --git a/tools/testing/selftests/bpf/xdpxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h
index 62a3e6388632..3d17053f98e5 100644
--- a/tools/testing/selftests/bpf/xdpxceiver.h
+++ b/tools/testing/selftests/bpf/xskxceiver.h
@@ -2,8 +2,8 @@
* Copyright(c) 2020 Intel Corporation.
*/
-#ifndef XDPXCEIVER_H_
-#define XDPXCEIVER_H_
+#ifndef XSKXCEIVER_H_
+#define XSKXCEIVER_H_
#ifndef SOL_XDP
#define SOL_XDP 283
@@ -17,6 +17,16 @@
#define PF_XDP AF_XDP
#endif
+#ifndef SO_BUSY_POLL_BUDGET
+#define SO_BUSY_POLL_BUDGET 70
+#endif
+
+#ifndef SO_PREFER_BUSY_POLL
+#define SO_PREFER_BUSY_POLL 69
+#endif
+
+#define TEST_PASS 0
+#define TEST_FAILURE -1
#define MAX_INTERFACES 2
#define MAX_INTERFACE_NAME_CHARS 7
#define MAX_INTERFACES_NAMESPACE_CHARS 10
@@ -25,9 +35,10 @@
#define MAX_TEARDOWN_ITER 10
#define PKT_HDR_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \
sizeof(struct udphdr))
-#define MIN_PKT_SIZE 64
+#define MIN_ETH_PKT_SIZE 64
#define ETH_FCS_SIZE 4
-#define PKT_SIZE (MIN_PKT_SIZE - ETH_FCS_SIZE)
+#define MIN_PKT_SIZE (MIN_ETH_PKT_SIZE - ETH_FCS_SIZE)
+#define PKT_SIZE (MIN_PKT_SIZE)
#define IP_PKT_SIZE (PKT_SIZE - sizeof(struct ethhdr))
#define IP_PKT_VER 0x4
#define IP_PKT_TOS 0x9
@@ -37,6 +48,7 @@
#define SOCK_RECONF_CTR 10
#define BATCH_SIZE 64
#define POLL_TMOUT 1000
+#define RECV_TMOUT 3
#define DEFAULT_PKT_CNT (4 * 1024)
#define DEFAULT_UMEM_BUFFERS (DEFAULT_PKT_CNT / 4)
#define UMEM_SIZE (DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE)
@@ -64,24 +76,16 @@ enum test_type {
TEST_TYPE_HEADROOM,
TEST_TYPE_TEARDOWN,
TEST_TYPE_BIDI,
- TEST_TYPE_STATS,
+ TEST_TYPE_STATS_RX_DROPPED,
+ TEST_TYPE_STATS_TX_INVALID_DESCS,
+ TEST_TYPE_STATS_RX_FULL,
+ TEST_TYPE_STATS_FILL_EMPTY,
TEST_TYPE_BPF_RES,
TEST_TYPE_MAX
};
-enum stat_test_type {
- STAT_TEST_RX_DROPPED,
- STAT_TEST_TX_INVALID,
- STAT_TEST_RX_FULL,
- STAT_TEST_RX_FILL_EMPTY,
- STAT_TEST_TYPE_MAX
-};
-
static bool opt_pkt_dump;
-static int test_type;
-
static bool opt_verbose;
-static int stat_test_type;
struct xsk_umem_info {
struct xsk_ring_prod fq;
@@ -117,6 +121,8 @@ struct pkt_stream {
bool use_addr_for_fill;
};
+struct ifobject;
+typedef int (*validation_func_t)(struct ifobject *ifobj);
typedef void *(*thread_func_t)(void *arg);
struct ifobject {
@@ -126,6 +132,7 @@ struct ifobject {
struct xsk_socket_info *xsk_arr;
struct xsk_umem_info *umem;
thread_func_t func_ptr;
+ validation_func_t validation_func;
struct pkt_stream *pkt_stream;
int ns_fd;
int xsk_map_fd;
@@ -138,7 +145,9 @@ struct ifobject {
bool tx_on;
bool rx_on;
bool use_poll;
- bool pacing_on;
+ bool busy_poll;
+ bool use_fill_ring;
+ bool release_rx;
u8 dst_mac[ETH_ALEN];
u8 src_mac[ETH_ALEN];
};
@@ -150,6 +159,7 @@ struct test_spec {
u16 total_steps;
u16 current_step;
u16 nb_sockets;
+ bool fail;
char name[MAX_TEST_NAME_SIZE];
};
@@ -157,6 +167,6 @@ pthread_barrier_t barr;
pthread_mutex_t pacing_mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t pacing_cond = PTHREAD_COND_INITIALIZER;
-u32 pkts_in_flight;
+int pkts_in_flight;
-#endif /* XDPXCEIVER_H */
+#endif /* XSKXCEIVER_H_ */
diff --git a/tools/testing/selftests/cgroup/.gitignore b/tools/testing/selftests/cgroup/.gitignore
index be9643ef6285..306ee1b01e72 100644
--- a/tools/testing/selftests/cgroup/.gitignore
+++ b/tools/testing/selftests/cgroup/.gitignore
@@ -4,3 +4,4 @@ test_core
test_freezer
test_kmem
test_kill
+test_cpu
diff --git a/tools/testing/selftests/cgroup/Makefile b/tools/testing/selftests/cgroup/Makefile
index 745fe25fa0b9..478217cc1371 100644
--- a/tools/testing/selftests/cgroup/Makefile
+++ b/tools/testing/selftests/cgroup/Makefile
@@ -10,6 +10,7 @@ TEST_GEN_PROGS += test_kmem
TEST_GEN_PROGS += test_core
TEST_GEN_PROGS += test_freezer
TEST_GEN_PROGS += test_kill
+TEST_GEN_PROGS += test_cpu
LOCAL_HDRS += $(selfdir)/clone3/clone3_selftests.h $(selfdir)/pidfd/pidfd.h
@@ -20,3 +21,4 @@ $(OUTPUT)/test_kmem: cgroup_util.c
$(OUTPUT)/test_core: cgroup_util.c
$(OUTPUT)/test_freezer: cgroup_util.c
$(OUTPUT)/test_kill: cgroup_util.c
+$(OUTPUT)/test_cpu: cgroup_util.c
diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
index dbaa7aabbb4a..4c52cc6f2f9c 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.c
+++ b/tools/testing/selftests/cgroup/cgroup_util.c
@@ -19,6 +19,7 @@
#include "cgroup_util.h"
#include "../clone3/clone3_selftests.h"
+/* Returns read len on success, or -errno on failure. */
static ssize_t read_text(const char *path, char *buf, size_t max_len)
{
ssize_t len;
@@ -26,35 +27,29 @@ static ssize_t read_text(const char *path, char *buf, size_t max_len)
fd = open(path, O_RDONLY);
if (fd < 0)
- return fd;
+ return -errno;
len = read(fd, buf, max_len - 1);
- if (len < 0)
- goto out;
- buf[len] = 0;
-out:
+ if (len >= 0)
+ buf[len] = 0;
+
close(fd);
- return len;
+ return len < 0 ? -errno : len;
}
+/* Returns written len on success, or -errno on failure. */
static ssize_t write_text(const char *path, char *buf, ssize_t len)
{
int fd;
fd = open(path, O_WRONLY | O_APPEND);
if (fd < 0)
- return fd;
+ return -errno;
len = write(fd, buf, len);
- if (len < 0) {
- close(fd);
- return len;
- }
-
close(fd);
-
- return len;
+ return len < 0 ? -errno : len;
}
char *cg_name(const char *root, const char *name)
@@ -87,16 +82,16 @@ char *cg_control(const char *cgroup, const char *control)
return ret;
}
+/* Returns 0 on success, or -errno on failure. */
int cg_read(const char *cgroup, const char *control, char *buf, size_t len)
{
char path[PATH_MAX];
+ ssize_t ret;
snprintf(path, sizeof(path), "%s/%s", cgroup, control);
- if (read_text(path, buf, len) >= 0)
- return 0;
-
- return -1;
+ ret = read_text(path, buf, len);
+ return ret >= 0 ? 0 : ret;
}
int cg_read_strcmp(const char *cgroup, const char *control,
@@ -177,17 +172,27 @@ long cg_read_lc(const char *cgroup, const char *control)
return cnt;
}
+/* Returns 0 on success, or -errno on failure. */
int cg_write(const char *cgroup, const char *control, char *buf)
{
char path[PATH_MAX];
- ssize_t len = strlen(buf);
+ ssize_t len = strlen(buf), ret;
snprintf(path, sizeof(path), "%s/%s", cgroup, control);
+ ret = write_text(path, buf, len);
+ return ret == len ? 0 : ret;
+}
- if (write_text(path, buf, len) == len)
- return 0;
+int cg_write_numeric(const char *cgroup, const char *control, long value)
+{
+ char buf[64];
+ int ret;
- return -1;
+ ret = sprintf(buf, "%lu", value);
+ if (ret < 0)
+ return ret;
+
+ return cg_write(cgroup, control, buf);
}
int cg_find_unified_root(char *root, size_t len)
@@ -535,6 +540,18 @@ int set_oom_adj_score(int pid, int score)
return 0;
}
+int proc_mount_contains(const char *option)
+{
+ char buf[4 * PAGE_SIZE];
+ ssize_t read;
+
+ read = read_text("/proc/mounts", buf, sizeof(buf));
+ if (read < 0)
+ return read;
+
+ return strstr(buf, option) != NULL;
+}
+
ssize_t proc_read_text(int pid, bool thread, const char *item, char *buf, size_t size)
{
char path[PATH_MAX];
@@ -545,7 +562,8 @@ ssize_t proc_read_text(int pid, bool thread, const char *item, char *buf, size_t
else
snprintf(path, sizeof(path), "/proc/%d/%s", pid, item);
- return read_text(path, buf, size);
+ size = read_text(path, buf, size);
+ return size < 0 ? -1 : size;
}
int proc_read_strstr(int pid, bool thread, const char *item, const char *needle)
diff --git a/tools/testing/selftests/cgroup/cgroup_util.h b/tools/testing/selftests/cgroup/cgroup_util.h
index 628738532ac9..c92df4e5d395 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.h
+++ b/tools/testing/selftests/cgroup/cgroup_util.h
@@ -8,6 +8,9 @@
#define MB(x) (x << 20)
+#define USEC_PER_SEC 1000000L
+#define NSEC_PER_SEC 1000000000L
+
/*
* Checks if two given values differ by less than err% of their sum.
*/
@@ -32,6 +35,7 @@ extern long cg_read_long(const char *cgroup, const char *control);
long cg_read_key_long(const char *cgroup, const char *control, const char *key);
extern long cg_read_lc(const char *cgroup, const char *control);
extern int cg_write(const char *cgroup, const char *control, char *buf);
+int cg_write_numeric(const char *cgroup, const char *control, long value);
extern int cg_run(const char *cgroup,
int (*fn)(const char *cgroup, void *arg),
void *arg);
@@ -48,6 +52,7 @@ extern int is_swap_enabled(void);
extern int set_oom_adj_score(int pid, int score);
extern int cg_wait_for_proc_count(const char *cgroup, int count);
extern int cg_killall(const char *cgroup);
+int proc_mount_contains(const char *option);
extern ssize_t proc_read_text(int pid, bool thread, const char *item, char *buf, size_t size);
extern int proc_read_strstr(int pid, bool thread, const char *item, const char *needle);
extern pid_t clone_into_cgroup(int cgroup_fd);
diff --git a/tools/testing/selftests/cgroup/config b/tools/testing/selftests/cgroup/config
new file mode 100644
index 000000000000..84fe884fad86
--- /dev/null
+++ b/tools/testing/selftests/cgroup/config
@@ -0,0 +1,8 @@
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_KMEM=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_PAGE_COUNTER=y
diff --git a/tools/testing/selftests/cgroup/memcg_protection.m b/tools/testing/selftests/cgroup/memcg_protection.m
new file mode 100644
index 000000000000..051daa3477b6
--- /dev/null
+++ b/tools/testing/selftests/cgroup/memcg_protection.m
@@ -0,0 +1,89 @@
+% SPDX-License-Identifier: GPL-2.0
+%
+% run as: octave-cli memcg_protection.m
+%
+% This script simulates reclaim protection behavior on a single level of memcg
+% hierarchy to illustrate how overcommitted protection spreads among siblings
+% (as it depends also on their current consumption).
+%
+% Simulation assumes siblings consumed the initial amount of memory (w/out
+% reclaim) and then the reclaim starts, all memory is reclaimable, i.e. treated
+% same. It simulates only non-low reclaim and assumes all memory.min = 0.
+%
+% Input configurations
+% --------------------
+% E number parent effective protection
+% n vector nominal protection of siblings set at the given level (memory.low)
+% c vector current consumption -,,- (memory.current)
+
+% example from testcase (values in GB)
+E = 50 / 1024;
+n = [75 25 0 500 ] / 1024;
+c = [50 50 50 0] / 1024;
+
+% Reclaim parameters
+% ------------------
+
+% Minimal reclaim amount (GB)
+cluster = 32*4 / 2**20;
+
+% Reclaim coefficient (think as 0.5^sc->priority)
+alpha = .1
+
+% Simulation parameters
+% ---------------------
+epsilon = 1e-7;
+timeout = 1000;
+
+% Simulation loop
+% ---------------
+
+ch = [];
+eh = [];
+rh = [];
+
+for t = 1:timeout
+ % low_usage
+ u = min(c, n);
+ siblings = sum(u);
+
+ % effective_protection()
+ protected = min(n, c); % start with nominal
+ e = protected * min(1, E / siblings); % normalize overcommit
+
+ % recursive protection
+ unclaimed = max(0, E - siblings);
+ parent_overuse = sum(c) - siblings;
+ if (unclaimed > 0 && parent_overuse > 0)
+ overuse = max(0, c - protected);
+ e += unclaimed * (overuse / parent_overuse);
+ endif
+
+ % get_scan_count()
+ r = alpha * c; % assume all memory is in a single LRU list
+
+ % commit 1bc63fb1272b ("mm, memcg: make scan aggression always exclude protection")
+ sz = max(e, c);
+ r .*= (1 - (e+epsilon) ./ (sz+epsilon));
+
+ % uncomment to debug prints
+ % e, c, r
+
+ % nothing to reclaim, reached equilibrium
+ if max(r) < epsilon
+ break;
+ endif
+
+ % SWAP_CLUSTER_MAX roundup
+ r = max(r, (r > epsilon) .* cluster);
+ % XXX here I do parallel reclaim of all siblings
+ % in reality reclaim is serialized and each sibling recalculates own residual
+ c = max(c - r, 0);
+
+ ch = [ch ; c];
+ eh = [eh ; e];
+ rh = [rh ; r];
+endfor
+
+t
+c, e
diff --git a/tools/testing/selftests/cgroup/test_cpu.c b/tools/testing/selftests/cgroup/test_cpu.c
new file mode 100644
index 000000000000..24020a2c68dc
--- /dev/null
+++ b/tools/testing/selftests/cgroup/test_cpu.c
@@ -0,0 +1,726 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <linux/limits.h>
+#include <sys/sysinfo.h>
+#include <sys/wait.h>
+#include <errno.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <time.h>
+
+#include "../kselftest.h"
+#include "cgroup_util.h"
+
+enum hog_clock_type {
+ // Count elapsed time using the CLOCK_PROCESS_CPUTIME_ID clock.
+ CPU_HOG_CLOCK_PROCESS,
+ // Count elapsed time using system wallclock time.
+ CPU_HOG_CLOCK_WALL,
+};
+
+struct cpu_hogger {
+ char *cgroup;
+ pid_t pid;
+ long usage;
+};
+
+struct cpu_hog_func_param {
+ int nprocs;
+ struct timespec ts;
+ enum hog_clock_type clock_type;
+};
+
+/*
+ * This test creates two nested cgroups with and without enabling
+ * the cpu controller.
+ */
+static int test_cpucg_subtree_control(const char *root)
+{
+ char *parent = NULL, *child = NULL, *parent2 = NULL, *child2 = NULL;
+ int ret = KSFT_FAIL;
+
+ // Create two nested cgroups with the cpu controller enabled.
+ parent = cg_name(root, "cpucg_test_0");
+ if (!parent)
+ goto cleanup;
+
+ if (cg_create(parent))
+ goto cleanup;
+
+ if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
+ goto cleanup;
+
+ child = cg_name(parent, "cpucg_test_child");
+ if (!child)
+ goto cleanup;
+
+ if (cg_create(child))
+ goto cleanup;
+
+ if (cg_read_strstr(child, "cgroup.controllers", "cpu"))
+ goto cleanup;
+
+ // Create two nested cgroups without enabling the cpu controller.
+ parent2 = cg_name(root, "cpucg_test_1");
+ if (!parent2)
+ goto cleanup;
+
+ if (cg_create(parent2))
+ goto cleanup;
+
+ child2 = cg_name(parent2, "cpucg_test_child");
+ if (!child2)
+ goto cleanup;
+
+ if (cg_create(child2))
+ goto cleanup;
+
+ if (!cg_read_strstr(child2, "cgroup.controllers", "cpu"))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ cg_destroy(child);
+ free(child);
+ cg_destroy(child2);
+ free(child2);
+ cg_destroy(parent);
+ free(parent);
+ cg_destroy(parent2);
+ free(parent2);
+
+ return ret;
+}
+
+static void *hog_cpu_thread_func(void *arg)
+{
+ while (1)
+ ;
+
+ return NULL;
+}
+
+static struct timespec
+timespec_sub(const struct timespec *lhs, const struct timespec *rhs)
+{
+ struct timespec zero = {
+ .tv_sec = 0,
+ .tv_nsec = 0,
+ };
+ struct timespec ret;
+
+ if (lhs->tv_sec < rhs->tv_sec)
+ return zero;
+
+ ret.tv_sec = lhs->tv_sec - rhs->tv_sec;
+
+ if (lhs->tv_nsec < rhs->tv_nsec) {
+ if (ret.tv_sec == 0)
+ return zero;
+
+ ret.tv_sec--;
+ ret.tv_nsec = NSEC_PER_SEC - rhs->tv_nsec + lhs->tv_nsec;
+ } else
+ ret.tv_nsec = lhs->tv_nsec - rhs->tv_nsec;
+
+ return ret;
+}
+
+static int hog_cpus_timed(const char *cgroup, void *arg)
+{
+ const struct cpu_hog_func_param *param =
+ (struct cpu_hog_func_param *)arg;
+ struct timespec ts_run = param->ts;
+ struct timespec ts_remaining = ts_run;
+ struct timespec ts_start;
+ int i, ret;
+
+ ret = clock_gettime(CLOCK_MONOTONIC, &ts_start);
+ if (ret != 0)
+ return ret;
+
+ for (i = 0; i < param->nprocs; i++) {
+ pthread_t tid;
+
+ ret = pthread_create(&tid, NULL, &hog_cpu_thread_func, NULL);
+ if (ret != 0)
+ return ret;
+ }
+
+ while (ts_remaining.tv_sec > 0 || ts_remaining.tv_nsec > 0) {
+ struct timespec ts_total;
+
+ ret = nanosleep(&ts_remaining, NULL);
+ if (ret && errno != EINTR)
+ return ret;
+
+ if (param->clock_type == CPU_HOG_CLOCK_PROCESS) {
+ ret = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts_total);
+ if (ret != 0)
+ return ret;
+ } else {
+ struct timespec ts_current;
+
+ ret = clock_gettime(CLOCK_MONOTONIC, &ts_current);
+ if (ret != 0)
+ return ret;
+
+ ts_total = timespec_sub(&ts_current, &ts_start);
+ }
+
+ ts_remaining = timespec_sub(&ts_run, &ts_total);
+ }
+
+ return 0;
+}
+
+/*
+ * Creates a cpu cgroup, burns a CPU for a few quanta, and verifies that
+ * cpu.stat shows the expected output.
+ */
+static int test_cpucg_stats(const char *root)
+{
+ int ret = KSFT_FAIL;
+ long usage_usec, user_usec, system_usec;
+ long usage_seconds = 2;
+ long expected_usage_usec = usage_seconds * USEC_PER_SEC;
+ char *cpucg;
+
+ cpucg = cg_name(root, "cpucg_test");
+ if (!cpucg)
+ goto cleanup;
+
+ if (cg_create(cpucg))
+ goto cleanup;
+
+ usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
+ user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
+ system_usec = cg_read_key_long(cpucg, "cpu.stat", "system_usec");
+ if (usage_usec != 0 || user_usec != 0 || system_usec != 0)
+ goto cleanup;
+
+ struct cpu_hog_func_param param = {
+ .nprocs = 1,
+ .ts = {
+ .tv_sec = usage_seconds,
+ .tv_nsec = 0,
+ },
+ .clock_type = CPU_HOG_CLOCK_PROCESS,
+ };
+ if (cg_run(cpucg, hog_cpus_timed, (void *)&param))
+ goto cleanup;
+
+ usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
+ user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
+ if (user_usec <= 0)
+ goto cleanup;
+
+ if (!values_close(usage_usec, expected_usage_usec, 1))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ cg_destroy(cpucg);
+ free(cpucg);
+
+ return ret;
+}
+
+static int
+run_cpucg_weight_test(
+ const char *root,
+ pid_t (*spawn_child)(const struct cpu_hogger *child),
+ int (*validate)(const struct cpu_hogger *children, int num_children))
+{
+ int ret = KSFT_FAIL, i;
+ char *parent = NULL;
+ struct cpu_hogger children[3] = {NULL};
+
+ parent = cg_name(root, "cpucg_test_0");
+ if (!parent)
+ goto cleanup;
+
+ if (cg_create(parent))
+ goto cleanup;
+
+ if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
+ goto cleanup;
+
+ for (i = 0; i < ARRAY_SIZE(children); i++) {
+ children[i].cgroup = cg_name_indexed(parent, "cpucg_child", i);
+ if (!children[i].cgroup)
+ goto cleanup;
+
+ if (cg_create(children[i].cgroup))
+ goto cleanup;
+
+ if (cg_write_numeric(children[i].cgroup, "cpu.weight",
+ 50 * (i + 1)))
+ goto cleanup;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(children); i++) {
+ pid_t pid = spawn_child(&children[i]);
+ if (pid <= 0)
+ goto cleanup;
+ children[i].pid = pid;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(children); i++) {
+ int retcode;
+
+ waitpid(children[i].pid, &retcode, 0);
+ if (!WIFEXITED(retcode))
+ goto cleanup;
+ if (WEXITSTATUS(retcode))
+ goto cleanup;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(children); i++)
+ children[i].usage = cg_read_key_long(children[i].cgroup,
+ "cpu.stat", "usage_usec");
+
+ if (validate(children, ARRAY_SIZE(children)))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+cleanup:
+ for (i = 0; i < ARRAY_SIZE(children); i++) {
+ cg_destroy(children[i].cgroup);
+ free(children[i].cgroup);
+ }
+ cg_destroy(parent);
+ free(parent);
+
+ return ret;
+}
+
+static pid_t weight_hog_ncpus(const struct cpu_hogger *child, int ncpus)
+{
+ long usage_seconds = 10;
+ struct cpu_hog_func_param param = {
+ .nprocs = ncpus,
+ .ts = {
+ .tv_sec = usage_seconds,
+ .tv_nsec = 0,
+ },
+ .clock_type = CPU_HOG_CLOCK_WALL,
+ };
+ return cg_run_nowait(child->cgroup, hog_cpus_timed, (void *)&param);
+}
+
+static pid_t weight_hog_all_cpus(const struct cpu_hogger *child)
+{
+ return weight_hog_ncpus(child, get_nprocs());
+}
+
+static int
+overprovision_validate(const struct cpu_hogger *children, int num_children)
+{
+ int ret = KSFT_FAIL, i;
+
+ for (i = 0; i < num_children - 1; i++) {
+ long delta;
+
+ if (children[i + 1].usage <= children[i].usage)
+ goto cleanup;
+
+ delta = children[i + 1].usage - children[i].usage;
+ if (!values_close(delta, children[0].usage, 35))
+ goto cleanup;
+ }
+
+ ret = KSFT_PASS;
+cleanup:
+ return ret;
+}
+
+/*
+ * First, this test creates the following hierarchy:
+ * A
+ * A/B cpu.weight = 50
+ * A/C cpu.weight = 100
+ * A/D cpu.weight = 150
+ *
+ * A separate process is then created for each child cgroup which spawns as
+ * many threads as there are cores, and hogs each CPU as much as possible
+ * for some time interval.
+ *
+ * Once all of the children have exited, we verify that each child cgroup
+ * was given proportional runtime as informed by their cpu.weight.
+ */
+static int test_cpucg_weight_overprovisioned(const char *root)
+{
+ return run_cpucg_weight_test(root, weight_hog_all_cpus,
+ overprovision_validate);
+}
+
+static pid_t weight_hog_one_cpu(const struct cpu_hogger *child)
+{
+ return weight_hog_ncpus(child, 1);
+}
+
+static int
+underprovision_validate(const struct cpu_hogger *children, int num_children)
+{
+ int ret = KSFT_FAIL, i;
+
+ for (i = 0; i < num_children - 1; i++) {
+ if (!values_close(children[i + 1].usage, children[0].usage, 15))
+ goto cleanup;
+ }
+
+ ret = KSFT_PASS;
+cleanup:
+ return ret;
+}
+
+/*
+ * First, this test creates the following hierarchy:
+ * A
+ * A/B cpu.weight = 50
+ * A/C cpu.weight = 100
+ * A/D cpu.weight = 150
+ *
+ * A separate process is then created for each child cgroup which spawns a
+ * single thread that hogs a CPU. The testcase is only run on systems that
+ * have at least one core per-thread in the child processes.
+ *
+ * Once all of the children have exited, we verify that each child cgroup
+ * had roughly the same runtime despite having different cpu.weight.
+ */
+static int test_cpucg_weight_underprovisioned(const char *root)
+{
+ // Only run the test if there are enough cores to avoid overprovisioning
+ // the system.
+ if (get_nprocs() < 4)
+ return KSFT_SKIP;
+
+ return run_cpucg_weight_test(root, weight_hog_one_cpu,
+ underprovision_validate);
+}
+
+static int
+run_cpucg_nested_weight_test(const char *root, bool overprovisioned)
+{
+ int ret = KSFT_FAIL, i;
+ char *parent = NULL, *child = NULL;
+ struct cpu_hogger leaf[3] = {NULL};
+ long nested_leaf_usage, child_usage;
+ int nprocs = get_nprocs();
+
+ if (!overprovisioned) {
+ if (nprocs < 4)
+ /*
+ * Only run the test if there are enough cores to avoid overprovisioning
+ * the system.
+ */
+ return KSFT_SKIP;
+ nprocs /= 4;
+ }
+
+ parent = cg_name(root, "cpucg_test");
+ child = cg_name(parent, "cpucg_child");
+ if (!parent || !child)
+ goto cleanup;
+
+ if (cg_create(parent))
+ goto cleanup;
+ if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
+ goto cleanup;
+
+ if (cg_create(child))
+ goto cleanup;
+ if (cg_write(child, "cgroup.subtree_control", "+cpu"))
+ goto cleanup;
+ if (cg_write(child, "cpu.weight", "1000"))
+ goto cleanup;
+
+ for (i = 0; i < ARRAY_SIZE(leaf); i++) {
+ const char *ancestor;
+ long weight;
+
+ if (i == 0) {
+ ancestor = parent;
+ weight = 1000;
+ } else {
+ ancestor = child;
+ weight = 5000;
+ }
+ leaf[i].cgroup = cg_name_indexed(ancestor, "cpucg_leaf", i);
+ if (!leaf[i].cgroup)
+ goto cleanup;
+
+ if (cg_create(leaf[i].cgroup))
+ goto cleanup;
+
+ if (cg_write_numeric(leaf[i].cgroup, "cpu.weight", weight))
+ goto cleanup;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(leaf); i++) {
+ pid_t pid;
+ struct cpu_hog_func_param param = {
+ .nprocs = nprocs,
+ .ts = {
+ .tv_sec = 10,
+ .tv_nsec = 0,
+ },
+ .clock_type = CPU_HOG_CLOCK_WALL,
+ };
+
+ pid = cg_run_nowait(leaf[i].cgroup, hog_cpus_timed,
+ (void *)&param);
+ if (pid <= 0)
+ goto cleanup;
+ leaf[i].pid = pid;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(leaf); i++) {
+ int retcode;
+
+ waitpid(leaf[i].pid, &retcode, 0);
+ if (!WIFEXITED(retcode))
+ goto cleanup;
+ if (WEXITSTATUS(retcode))
+ goto cleanup;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(leaf); i++) {
+ leaf[i].usage = cg_read_key_long(leaf[i].cgroup,
+ "cpu.stat", "usage_usec");
+ if (leaf[i].usage <= 0)
+ goto cleanup;
+ }
+
+ nested_leaf_usage = leaf[1].usage + leaf[2].usage;
+ if (overprovisioned) {
+ if (!values_close(leaf[0].usage, nested_leaf_usage, 15))
+ goto cleanup;
+ } else if (!values_close(leaf[0].usage * 2, nested_leaf_usage, 15))
+ goto cleanup;
+
+
+ child_usage = cg_read_key_long(child, "cpu.stat", "usage_usec");
+ if (child_usage <= 0)
+ goto cleanup;
+ if (!values_close(child_usage, nested_leaf_usage, 1))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+cleanup:
+ for (i = 0; i < ARRAY_SIZE(leaf); i++) {
+ cg_destroy(leaf[i].cgroup);
+ free(leaf[i].cgroup);
+ }
+ cg_destroy(child);
+ free(child);
+ cg_destroy(parent);
+ free(parent);
+
+ return ret;
+}
+
+/*
+ * First, this test creates the following hierarchy:
+ * A
+ * A/B cpu.weight = 1000
+ * A/C cpu.weight = 1000
+ * A/C/D cpu.weight = 5000
+ * A/C/E cpu.weight = 5000
+ *
+ * A separate process is then created for each leaf, which spawn nproc threads
+ * that burn a CPU for a few seconds.
+ *
+ * Once all of those processes have exited, we verify that each of the leaf
+ * cgroups have roughly the same usage from cpu.stat.
+ */
+static int
+test_cpucg_nested_weight_overprovisioned(const char *root)
+{
+ return run_cpucg_nested_weight_test(root, true);
+}
+
+/*
+ * First, this test creates the following hierarchy:
+ * A
+ * A/B cpu.weight = 1000
+ * A/C cpu.weight = 1000
+ * A/C/D cpu.weight = 5000
+ * A/C/E cpu.weight = 5000
+ *
+ * A separate process is then created for each leaf, which nproc / 4 threads
+ * that burns a CPU for a few seconds.
+ *
+ * Once all of those processes have exited, we verify that each of the leaf
+ * cgroups have roughly the same usage from cpu.stat.
+ */
+static int
+test_cpucg_nested_weight_underprovisioned(const char *root)
+{
+ return run_cpucg_nested_weight_test(root, false);
+}
+
+/*
+ * This test creates a cgroup with some maximum value within a period, and
+ * verifies that a process in the cgroup is not overscheduled.
+ */
+static int test_cpucg_max(const char *root)
+{
+ int ret = KSFT_FAIL;
+ long usage_usec, user_usec;
+ long usage_seconds = 1;
+ long expected_usage_usec = usage_seconds * USEC_PER_SEC;
+ char *cpucg;
+
+ cpucg = cg_name(root, "cpucg_test");
+ if (!cpucg)
+ goto cleanup;
+
+ if (cg_create(cpucg))
+ goto cleanup;
+
+ if (cg_write(cpucg, "cpu.max", "1000"))
+ goto cleanup;
+
+ struct cpu_hog_func_param param = {
+ .nprocs = 1,
+ .ts = {
+ .tv_sec = usage_seconds,
+ .tv_nsec = 0,
+ },
+ .clock_type = CPU_HOG_CLOCK_WALL,
+ };
+ if (cg_run(cpucg, hog_cpus_timed, (void *)&param))
+ goto cleanup;
+
+ usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
+ user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
+ if (user_usec <= 0)
+ goto cleanup;
+
+ if (user_usec >= expected_usage_usec)
+ goto cleanup;
+
+ if (values_close(usage_usec, expected_usage_usec, 95))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ cg_destroy(cpucg);
+ free(cpucg);
+
+ return ret;
+}
+
+/*
+ * This test verifies that a process inside of a nested cgroup whose parent
+ * group has a cpu.max value set, is properly throttled.
+ */
+static int test_cpucg_max_nested(const char *root)
+{
+ int ret = KSFT_FAIL;
+ long usage_usec, user_usec;
+ long usage_seconds = 1;
+ long expected_usage_usec = usage_seconds * USEC_PER_SEC;
+ char *parent, *child;
+
+ parent = cg_name(root, "cpucg_parent");
+ child = cg_name(parent, "cpucg_child");
+ if (!parent || !child)
+ goto cleanup;
+
+ if (cg_create(parent))
+ goto cleanup;
+
+ if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
+ goto cleanup;
+
+ if (cg_create(child))
+ goto cleanup;
+
+ if (cg_write(parent, "cpu.max", "1000"))
+ goto cleanup;
+
+ struct cpu_hog_func_param param = {
+ .nprocs = 1,
+ .ts = {
+ .tv_sec = usage_seconds,
+ .tv_nsec = 0,
+ },
+ .clock_type = CPU_HOG_CLOCK_WALL,
+ };
+ if (cg_run(child, hog_cpus_timed, (void *)&param))
+ goto cleanup;
+
+ usage_usec = cg_read_key_long(child, "cpu.stat", "usage_usec");
+ user_usec = cg_read_key_long(child, "cpu.stat", "user_usec");
+ if (user_usec <= 0)
+ goto cleanup;
+
+ if (user_usec >= expected_usage_usec)
+ goto cleanup;
+
+ if (values_close(usage_usec, expected_usage_usec, 95))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ cg_destroy(child);
+ free(child);
+ cg_destroy(parent);
+ free(parent);
+
+ return ret;
+}
+
+#define T(x) { x, #x }
+struct cpucg_test {
+ int (*fn)(const char *root);
+ const char *name;
+} tests[] = {
+ T(test_cpucg_subtree_control),
+ T(test_cpucg_stats),
+ T(test_cpucg_weight_overprovisioned),
+ T(test_cpucg_weight_underprovisioned),
+ T(test_cpucg_nested_weight_overprovisioned),
+ T(test_cpucg_nested_weight_underprovisioned),
+ T(test_cpucg_max),
+ T(test_cpucg_max_nested),
+};
+#undef T
+
+int main(int argc, char *argv[])
+{
+ char root[PATH_MAX];
+ int i, ret = EXIT_SUCCESS;
+
+ if (cg_find_unified_root(root, sizeof(root)))
+ ksft_exit_skip("cgroup v2 isn't mounted\n");
+
+ if (cg_read_strstr(root, "cgroup.subtree_control", "cpu"))
+ if (cg_write(root, "cgroup.subtree_control", "+cpu"))
+ ksft_exit_skip("Failed to set cpu controller\n");
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ switch (tests[i].fn(root)) {
+ case KSFT_PASS:
+ ksft_test_result_pass("%s\n", tests[i].name);
+ break;
+ case KSFT_SKIP:
+ ksft_test_result_skip("%s\n", tests[i].name);
+ break;
+ default:
+ ret = EXIT_FAILURE;
+ ksft_test_result_fail("%s\n", tests[i].name);
+ break;
+ }
+ }
+
+ return ret;
+}
diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
index 36ccf2322e21..8833359556f3 100644
--- a/tools/testing/selftests/cgroup/test_memcontrol.c
+++ b/tools/testing/selftests/cgroup/test_memcontrol.c
@@ -21,6 +21,9 @@
#include "../kselftest.h"
#include "cgroup_util.h"
+static bool has_localevents;
+static bool has_recursiveprot;
+
/*
* This test creates two nested cgroups with and without enabling
* the memory controller.
@@ -187,13 +190,6 @@ cleanup:
return ret;
}
-static int alloc_pagecache_50M(const char *cgroup, void *arg)
-{
- int fd = (long)arg;
-
- return alloc_pagecache(fd, MB(50));
-}
-
static int alloc_pagecache_50M_noexit(const char *cgroup, void *arg)
{
int fd = (long)arg;
@@ -211,13 +207,17 @@ static int alloc_pagecache_50M_noexit(const char *cgroup, void *arg)
static int alloc_anon_noexit(const char *cgroup, void *arg)
{
int ppid = getppid();
+ size_t size = (unsigned long)arg;
+ char *buf, *ptr;
- if (alloc_anon(cgroup, arg))
- return -1;
+ buf = malloc(size);
+ for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE)
+ *ptr = 0;
while (getppid() == ppid)
sleep(1);
+ free(buf);
return 0;
}
@@ -240,33 +240,39 @@ static int cg_test_proc_killed(const char *cgroup)
/*
* First, this test creates the following hierarchy:
- * A memory.min = 50M, memory.max = 200M
- * A/B memory.min = 50M, memory.current = 50M
+ * A memory.min = 0, memory.max = 200M
+ * A/B memory.min = 50M
* A/B/C memory.min = 75M, memory.current = 50M
* A/B/D memory.min = 25M, memory.current = 50M
- * A/B/E memory.min = 500M, memory.current = 0
- * A/B/F memory.min = 0, memory.current = 50M
+ * A/B/E memory.min = 0, memory.current = 50M
+ * A/B/F memory.min = 500M, memory.current = 0
+ *
+ * (or memory.low if we test soft protection)
*
- * Usages are pagecache, but the test keeps a running
+ * Usages are pagecache and the test keeps a running
* process in every leaf cgroup.
* Then it creates A/G and creates a significant
- * memory pressure in it.
+ * memory pressure in A.
*
+ * Then it checks actual memory usages and expects that:
* A/B memory.current ~= 50M
- * A/B/C memory.current ~= 33M
- * A/B/D memory.current ~= 17M
+ * A/B/C memory.current ~= 29M
+ * A/B/D memory.current ~= 21M
* A/B/E memory.current ~= 0
+ * A/B/F memory.current = 0
+ * (for origin of the numbers, see model in memcg_protection.m.)
*
* After that it tries to allocate more than there is
- * unprotected memory in A available, and checks
- * checks that memory.min protects pagecache even
- * in this case.
+ * unprotected memory in A available, and checks that:
+ * a) memory.min protects pagecache even in this case,
+ * b) memory.low allows reclaiming page cache with low events.
*/
-static int test_memcg_min(const char *root)
+static int test_memcg_protection(const char *root, bool min)
{
- int ret = KSFT_FAIL;
+ int ret = KSFT_FAIL, rc;
char *parent[3] = {NULL};
char *children[4] = {NULL};
+ const char *attribute = min ? "memory.min" : "memory.low";
long c[4];
int i, attempts;
int fd;
@@ -290,8 +296,10 @@ static int test_memcg_min(const char *root)
if (cg_create(parent[0]))
goto cleanup;
- if (cg_read_long(parent[0], "memory.min")) {
- ret = KSFT_SKIP;
+ if (cg_read_long(parent[0], attribute)) {
+ /* No memory.min on older kernels is fine */
+ if (min)
+ ret = KSFT_SKIP;
goto cleanup;
}
@@ -321,24 +329,22 @@ static int test_memcg_min(const char *root)
if (cg_create(children[i]))
goto cleanup;
- if (i == 2)
+ if (i > 2)
continue;
cg_run_nowait(children[i], alloc_pagecache_50M_noexit,
(void *)(long)fd);
}
- if (cg_write(parent[0], "memory.min", "50M"))
+ if (cg_write(parent[1], attribute, "50M"))
goto cleanup;
- if (cg_write(parent[1], "memory.min", "50M"))
+ if (cg_write(children[0], attribute, "75M"))
goto cleanup;
- if (cg_write(children[0], "memory.min", "75M"))
+ if (cg_write(children[1], attribute, "25M"))
goto cleanup;
- if (cg_write(children[1], "memory.min", "25M"))
+ if (cg_write(children[2], attribute, "0"))
goto cleanup;
- if (cg_write(children[2], "memory.min", "500M"))
- goto cleanup;
- if (cg_write(children[3], "memory.min", "0"))
+ if (cg_write(children[3], attribute, "500M"))
goto cleanup;
attempts = 0;
@@ -358,178 +364,46 @@ static int test_memcg_min(const char *root)
for (i = 0; i < ARRAY_SIZE(children); i++)
c[i] = cg_read_long(children[i], "memory.current");
- if (!values_close(c[0], MB(33), 10))
- goto cleanup;
-
- if (!values_close(c[1], MB(17), 10))
- goto cleanup;
-
- if (!values_close(c[2], 0, 1))
- goto cleanup;
-
- if (!cg_run(parent[2], alloc_anon, (void *)MB(170)))
- goto cleanup;
-
- if (!values_close(cg_read_long(parent[1], "memory.current"), MB(50), 3))
- goto cleanup;
-
- ret = KSFT_PASS;
-
-cleanup:
- for (i = ARRAY_SIZE(children) - 1; i >= 0; i--) {
- if (!children[i])
- continue;
-
- cg_destroy(children[i]);
- free(children[i]);
- }
-
- for (i = ARRAY_SIZE(parent) - 1; i >= 0; i--) {
- if (!parent[i])
- continue;
-
- cg_destroy(parent[i]);
- free(parent[i]);
- }
- close(fd);
- return ret;
-}
-
-/*
- * First, this test creates the following hierarchy:
- * A memory.low = 50M, memory.max = 200M
- * A/B memory.low = 50M, memory.current = 50M
- * A/B/C memory.low = 75M, memory.current = 50M
- * A/B/D memory.low = 25M, memory.current = 50M
- * A/B/E memory.low = 500M, memory.current = 0
- * A/B/F memory.low = 0, memory.current = 50M
- *
- * Usages are pagecache.
- * Then it creates A/G an creates a significant
- * memory pressure in it.
- *
- * Then it checks actual memory usages and expects that:
- * A/B memory.current ~= 50M
- * A/B/ memory.current ~= 33M
- * A/B/D memory.current ~= 17M
- * A/B/E memory.current ~= 0
- *
- * After that it tries to allocate more than there is
- * unprotected memory in A available,
- * and checks low and oom events in memory.events.
- */
-static int test_memcg_low(const char *root)
-{
- int ret = KSFT_FAIL;
- char *parent[3] = {NULL};
- char *children[4] = {NULL};
- long low, oom;
- long c[4];
- int i;
- int fd;
-
- fd = get_temp_fd();
- if (fd < 0)
- goto cleanup;
-
- parent[0] = cg_name(root, "memcg_test_0");
- if (!parent[0])
- goto cleanup;
-
- parent[1] = cg_name(parent[0], "memcg_test_1");
- if (!parent[1])
- goto cleanup;
-
- parent[2] = cg_name(parent[0], "memcg_test_2");
- if (!parent[2])
- goto cleanup;
-
- if (cg_create(parent[0]))
- goto cleanup;
-
- if (cg_read_long(parent[0], "memory.low"))
- goto cleanup;
-
- if (cg_write(parent[0], "cgroup.subtree_control", "+memory"))
- goto cleanup;
-
- if (cg_write(parent[0], "memory.max", "200M"))
+ if (!values_close(c[0], MB(29), 10))
goto cleanup;
- if (cg_write(parent[0], "memory.swap.max", "0"))
+ if (!values_close(c[1], MB(21), 10))
goto cleanup;
- if (cg_create(parent[1]))
+ if (c[3] != 0)
goto cleanup;
- if (cg_write(parent[1], "cgroup.subtree_control", "+memory"))
+ rc = cg_run(parent[2], alloc_anon, (void *)MB(170));
+ if (min && !rc)
goto cleanup;
-
- if (cg_create(parent[2]))
+ else if (!min && rc) {
+ fprintf(stderr,
+ "memory.low prevents from allocating anon memory\n");
goto cleanup;
-
- for (i = 0; i < ARRAY_SIZE(children); i++) {
- children[i] = cg_name_indexed(parent[1], "child_memcg", i);
- if (!children[i])
- goto cleanup;
-
- if (cg_create(children[i]))
- goto cleanup;
-
- if (i == 2)
- continue;
-
- if (cg_run(children[i], alloc_pagecache_50M, (void *)(long)fd))
- goto cleanup;
}
- if (cg_write(parent[0], "memory.low", "50M"))
- goto cleanup;
- if (cg_write(parent[1], "memory.low", "50M"))
- goto cleanup;
- if (cg_write(children[0], "memory.low", "75M"))
- goto cleanup;
- if (cg_write(children[1], "memory.low", "25M"))
- goto cleanup;
- if (cg_write(children[2], "memory.low", "500M"))
- goto cleanup;
- if (cg_write(children[3], "memory.low", "0"))
- goto cleanup;
-
- if (cg_run(parent[2], alloc_anon, (void *)MB(148)))
- goto cleanup;
-
if (!values_close(cg_read_long(parent[1], "memory.current"), MB(50), 3))
goto cleanup;
- for (i = 0; i < ARRAY_SIZE(children); i++)
- c[i] = cg_read_long(children[i], "memory.current");
-
- if (!values_close(c[0], MB(33), 10))
- goto cleanup;
-
- if (!values_close(c[1], MB(17), 10))
- goto cleanup;
-
- if (!values_close(c[2], 0, 1))
- goto cleanup;
-
- if (cg_run(parent[2], alloc_anon, (void *)MB(166))) {
- fprintf(stderr,
- "memory.low prevents from allocating anon memory\n");
+ if (min) {
+ ret = KSFT_PASS;
goto cleanup;
}
for (i = 0; i < ARRAY_SIZE(children); i++) {
+ int no_low_events_index = 1;
+ long low, oom;
+
oom = cg_read_key_long(children[i], "memory.events", "oom ");
low = cg_read_key_long(children[i], "memory.events", "low ");
if (oom)
goto cleanup;
- if (i < 2 && low <= 0)
+ if (i <= no_low_events_index && low <= 0)
goto cleanup;
- if (i >= 2 && low)
+ if (i > no_low_events_index && low)
goto cleanup;
+
}
ret = KSFT_PASS;
@@ -554,13 +428,28 @@ cleanup:
return ret;
}
+static int test_memcg_min(const char *root)
+{
+ return test_memcg_protection(root, true);
+}
+
+static int test_memcg_low(const char *root)
+{
+ return test_memcg_protection(root, false);
+}
+
static int alloc_pagecache_max_30M(const char *cgroup, void *arg)
{
size_t size = MB(50);
int ret = -1;
- long current;
+ long current, high, max;
int fd;
+ high = cg_read_long(cgroup, "memory.high");
+ max = cg_read_long(cgroup, "memory.max");
+ if (high != MB(30) && max != MB(30))
+ return -1;
+
fd = get_temp_fd();
if (fd < 0)
return -1;
@@ -569,7 +458,7 @@ static int alloc_pagecache_max_30M(const char *cgroup, void *arg)
goto cleanup;
current = cg_read_long(cgroup, "memory.current");
- if (current <= MB(29) || current > MB(30))
+ if (!values_close(current, MB(30), 5))
goto cleanup;
ret = 0;
@@ -607,7 +496,7 @@ static int test_memcg_high(const char *root)
if (cg_write(memcg, "memory.high", "30M"))
goto cleanup;
- if (cg_run(memcg, alloc_anon, (void *)MB(100)))
+ if (cg_run(memcg, alloc_anon, (void *)MB(31)))
goto cleanup;
if (!cg_run(memcg, alloc_pagecache_50M_check, NULL))
@@ -756,6 +645,111 @@ cleanup:
return ret;
}
+/*
+ * This test checks that memory.reclaim reclaims the given
+ * amount of memory (from both anon and file, if possible).
+ */
+static int test_memcg_reclaim(const char *root)
+{
+ int ret = KSFT_FAIL, fd, retries;
+ char *memcg;
+ long current, expected_usage, to_reclaim;
+ char buf[64];
+
+ memcg = cg_name(root, "memcg_test");
+ if (!memcg)
+ goto cleanup;
+
+ if (cg_create(memcg))
+ goto cleanup;
+
+ current = cg_read_long(memcg, "memory.current");
+ if (current != 0)
+ goto cleanup;
+
+ fd = get_temp_fd();
+ if (fd < 0)
+ goto cleanup;
+
+ cg_run_nowait(memcg, alloc_pagecache_50M_noexit, (void *)(long)fd);
+
+ /*
+ * If swap is enabled, try to reclaim from both anon and file, else try
+ * to reclaim from file only.
+ */
+ if (is_swap_enabled()) {
+ cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(50));
+ expected_usage = MB(100);
+ } else
+ expected_usage = MB(50);
+
+ /*
+ * Wait until current usage reaches the expected usage (or we run out of
+ * retries).
+ */
+ retries = 5;
+ while (!values_close(cg_read_long(memcg, "memory.current"),
+ expected_usage, 10)) {
+ if (retries--) {
+ sleep(1);
+ continue;
+ } else {
+ fprintf(stderr,
+ "failed to allocate %ld for memcg reclaim test\n",
+ expected_usage);
+ goto cleanup;
+ }
+ }
+
+ /*
+ * Reclaim until current reaches 30M, this makes sure we hit both anon
+ * and file if swap is enabled.
+ */
+ retries = 5;
+ while (true) {
+ int err;
+
+ current = cg_read_long(memcg, "memory.current");
+ to_reclaim = current - MB(30);
+
+ /*
+ * We only keep looping if we get EAGAIN, which means we could
+ * not reclaim the full amount.
+ */
+ if (to_reclaim <= 0)
+ goto cleanup;
+
+
+ snprintf(buf, sizeof(buf), "%ld", to_reclaim);
+ err = cg_write(memcg, "memory.reclaim", buf);
+ if (!err) {
+ /*
+ * If writing succeeds, then the written amount should have been
+ * fully reclaimed (and maybe more).
+ */
+ current = cg_read_long(memcg, "memory.current");
+ if (!values_close(current, MB(30), 3) && current > MB(30))
+ goto cleanup;
+ break;
+ }
+
+ /* The kernel could not reclaim the full amount, try again. */
+ if (err == -EAGAIN && retries--)
+ continue;
+
+ /* We got an unexpected error or ran out of retries. */
+ goto cleanup;
+ }
+
+ ret = KSFT_PASS;
+cleanup:
+ cg_destroy(memcg);
+ free(memcg);
+ close(fd);
+
+ return ret;
+}
+
static int alloc_anon_50M_check_swap(const char *cgroup, void *arg)
{
long mem_max = (long)arg;
@@ -987,9 +981,6 @@ static int tcp_client(const char *cgroup, unsigned short port)
if (current < 0 || sock < 0)
goto close_sk;
- if (current < sock)
- goto close_sk;
-
if (values_close(current, sock, 10)) {
ret = KSFT_PASS;
break;
@@ -1079,12 +1070,14 @@ cleanup:
/*
* This test disables swapping and tries to allocate anonymous memory
* up to OOM with memory.group.oom set. Then it checks that all
- * processes in the leaf (but not the parent) were killed.
+ * processes in the leaf were killed. It also checks that oom_events
+ * were propagated to the parent level.
*/
static int test_memcg_oom_group_leaf_events(const char *root)
{
int ret = KSFT_FAIL;
char *parent, *child;
+ long parent_oom_events;
parent = cg_name(root, "memcg_test_0");
child = cg_name(root, "memcg_test_0/memcg_test_1");
@@ -1122,7 +1115,16 @@ static int test_memcg_oom_group_leaf_events(const char *root)
if (cg_read_key_long(child, "memory.events", "oom_kill ") <= 0)
goto cleanup;
- if (cg_read_key_long(parent, "memory.events", "oom_kill ") != 0)
+ parent_oom_events = cg_read_key_long(
+ parent, "memory.events", "oom_kill ");
+ /*
+ * If memory_localevents is not enabled (the default), the parent should
+ * count OOM events in its children groups. Otherwise, it should not
+ * have observed any events.
+ */
+ if (has_localevents && parent_oom_events != 0)
+ goto cleanup;
+ else if (!has_localevents && parent_oom_events <= 0)
goto cleanup;
ret = KSFT_PASS;
@@ -1246,7 +1248,6 @@ cleanup:
return ret;
}
-
#define T(x) { x, #x }
struct memcg_test {
int (*fn)(const char *root);
@@ -1259,6 +1260,7 @@ struct memcg_test {
T(test_memcg_high),
T(test_memcg_high_sync),
T(test_memcg_max),
+ T(test_memcg_reclaim),
T(test_memcg_oom_events),
T(test_memcg_swap_max),
T(test_memcg_sock),
@@ -1271,7 +1273,7 @@ struct memcg_test {
int main(int argc, char **argv)
{
char root[PATH_MAX];
- int i, ret = EXIT_SUCCESS;
+ int i, proc_status, ret = EXIT_SUCCESS;
if (cg_find_unified_root(root, sizeof(root)))
ksft_exit_skip("cgroup v2 isn't mounted\n");
@@ -1287,6 +1289,16 @@ int main(int argc, char **argv)
if (cg_write(root, "cgroup.subtree_control", "+memory"))
ksft_exit_skip("Failed to set memory controller\n");
+ proc_status = proc_mount_contains("memory_recursiveprot");
+ if (proc_status < 0)
+ ksft_exit_skip("Failed to query cgroup mount option\n");
+ has_recursiveprot = proc_status;
+
+ proc_status = proc_mount_contains("memory_localevents");
+ if (proc_status < 0)
+ ksft_exit_skip("Failed to query cgroup mount option\n");
+ has_localevents = proc_status;
+
for (i = 0; i < ARRAY_SIZE(tests); i++) {
switch (tests[i].fn(root)) {
case KSFT_PASS:
diff --git a/tools/testing/selftests/cgroup/test_stress.sh b/tools/testing/selftests/cgroup/test_stress.sh
index 15d9d5896394..3c9c4554d5f6 100755
--- a/tools/testing/selftests/cgroup/test_stress.sh
+++ b/tools/testing/selftests/cgroup/test_stress.sh
@@ -1,4 +1,4 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-./with_stress.sh -s subsys -s fork ./test_core
+./with_stress.sh -s subsys -s fork ${OUTPUT:-.}/test_core
diff --git a/tools/testing/selftests/damon/_chk_dependency.sh b/tools/testing/selftests/damon/_chk_dependency.sh
index 0189db81550b..0328ac0b5a5e 100644
--- a/tools/testing/selftests/damon/_chk_dependency.sh
+++ b/tools/testing/selftests/damon/_chk_dependency.sh
@@ -26,3 +26,13 @@ do
exit 1
fi
done
+
+permission_error="Operation not permitted"
+for f in attrs target_ids monitor_on
+do
+ status=$( cat "$DBGFS/$f" 2>&1 )
+ if [ "${status#*$permission_error}" != "$status" ]; then
+ echo "Permission for reading $DBGFS/$f denied; maybe secureboot enabled?"
+ exit $ksft_skip
+ fi
+done
diff --git a/tools/testing/selftests/damon/sysfs.sh b/tools/testing/selftests/damon/sysfs.sh
index 2e3ae77cb6db..89592c64462f 100644
--- a/tools/testing/selftests/damon/sysfs.sh
+++ b/tools/testing/selftests/damon/sysfs.sh
@@ -231,6 +231,7 @@ test_context()
{
context_dir=$1
ensure_dir "$context_dir" "exist"
+ ensure_file "$context_dir/avail_operations" "exit" 400
ensure_file "$context_dir/operations" "exist" 600
test_monitoring_attrs "$context_dir/monitoring_attrs"
test_targets "$context_dir/targets"
diff --git a/tools/testing/selftests/dma/Makefile b/tools/testing/selftests/dma/Makefile
index aa8e8b5b3864..cd8c5ece1cba 100644
--- a/tools/testing/selftests/dma/Makefile
+++ b/tools/testing/selftests/dma/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS += -I../../../../usr/include/
+CFLAGS += -I../../../../include/
TEST_GEN_PROGS := dma_map_benchmark
diff --git a/tools/testing/selftests/dma/dma_map_benchmark.c b/tools/testing/selftests/dma/dma_map_benchmark.c
index c3b3c09e995e..5c997f17fcbd 100644
--- a/tools/testing/selftests/dma/dma_map_benchmark.c
+++ b/tools/testing/selftests/dma/dma_map_benchmark.c
@@ -10,8 +10,8 @@
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
-#include <linux/map_benchmark.h>
#include <linux/types.h>
+#include <linux/map_benchmark.h>
#define NSEC_PER_MSEC 1000000L
diff --git a/tools/testing/selftests/drivers/.gitignore b/tools/testing/selftests/drivers/.gitignore
index ca74f2e1c719..09e23b5afa96 100644
--- a/tools/testing/selftests/drivers/.gitignore
+++ b/tools/testing/selftests/drivers/.gitignore
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
/dma-buf/udmabuf
+/s390x/uvdevice/test_uvdevice
diff --git a/tools/testing/selftests/drivers/dma-buf/udmabuf.c b/tools/testing/selftests/drivers/dma-buf/udmabuf.c
index de1c4e6de0b2..c812080e304e 100644
--- a/tools/testing/selftests/drivers/dma-buf/udmabuf.c
+++ b/tools/testing/selftests/drivers/dma-buf/udmabuf.c
@@ -32,7 +32,8 @@ int main(int argc, char *argv[])
devfd = open("/dev/udmabuf", O_RDWR);
if (devfd < 0) {
- printf("%s: [skip,no-udmabuf]\n", TEST_PREFIX);
+ printf("%s: [skip,no-udmabuf: Unable to access DMA buffer device file]\n",
+ TEST_PREFIX);
exit(77);
}
diff --git a/tools/testing/selftests/drivers/gpu/drm_mm.sh b/tools/testing/selftests/drivers/gpu/drm_mm.sh
index b789dc8257e6..09c76cd7661d 100755
--- a/tools/testing/selftests/drivers/gpu/drm_mm.sh
+++ b/tools/testing/selftests/drivers/gpu/drm_mm.sh
@@ -3,7 +3,7 @@
# Runs API tests for struct drm_mm (DRM range manager)
if ! /sbin/modprobe -n -q test-drm_mm; then
- echo "drivers/gpu/drm_mm: [skip]"
+ echo "drivers/gpu/drm_mm: module test-drm_mm is not found in /lib/modules/`uname -r` [skip]"
exit 77
fi
@@ -11,6 +11,6 @@ if /sbin/modprobe -q test-drm_mm; then
/sbin/modprobe -q -r test-drm_mm
echo "drivers/gpu/drm_mm: ok"
else
- echo "drivers/gpu/drm_mm: [FAIL]"
+ echo "drivers/gpu/drm_mm: module test-drm_mm could not be removed [FAIL]"
exit 1
fi
diff --git a/tools/testing/selftests/drivers/net/dsa/Makefile b/tools/testing/selftests/drivers/net/dsa/Makefile
new file mode 100644
index 000000000000..2a731d5c6d85
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0+ OR MIT
+
+TEST_PROGS = bridge_locked_port.sh \
+ bridge_mdb.sh \
+ bridge_mld.sh \
+ bridge_vlan_aware.sh \
+ bridge_vlan_mcast.sh \
+ bridge_vlan_unaware.sh \
+ local_termination.sh \
+ no_forwarding.sh \
+ test_bridge_fdb_stress.sh
+
+TEST_PROGS_EXTENDED := lib.sh
+
+TEST_FILES := forwarding.config
+
+include ../../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_locked_port.sh b/tools/testing/selftests/drivers/net/dsa/bridge_locked_port.sh
new file mode 120000
index 000000000000..f5eb940c4c7c
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_locked_port.sh
@@ -0,0 +1 @@
+../../../net/forwarding/bridge_locked_port.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_mdb.sh b/tools/testing/selftests/drivers/net/dsa/bridge_mdb.sh
new file mode 120000
index 000000000000..76492da525f7
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_mdb.sh
@@ -0,0 +1 @@
+../../../net/forwarding/bridge_mdb.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_mld.sh b/tools/testing/selftests/drivers/net/dsa/bridge_mld.sh
new file mode 120000
index 000000000000..81a7e0df0474
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_mld.sh
@@ -0,0 +1 @@
+../../../net/forwarding/bridge_mld.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_vlan_aware.sh b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_aware.sh
new file mode 120000
index 000000000000..9831ed74376a
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_aware.sh
@@ -0,0 +1 @@
+../../../net/forwarding/bridge_vlan_aware.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_vlan_mcast.sh b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_mcast.sh
new file mode 120000
index 000000000000..7f3c3f0bf719
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_mcast.sh
@@ -0,0 +1 @@
+../../../net/forwarding/bridge_vlan_mcast.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/bridge_vlan_unaware.sh b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_unaware.sh
new file mode 120000
index 000000000000..bf1a57e6bde1
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/bridge_vlan_unaware.sh
@@ -0,0 +1 @@
+../../../net/forwarding/bridge_vlan_unaware.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/forwarding.config b/tools/testing/selftests/drivers/net/dsa/forwarding.config
new file mode 100644
index 000000000000..7adc1396fae0
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/forwarding.config
@@ -0,0 +1,2 @@
+NETIF_CREATE=no
+STABLE_MAC_ADDRS=yes
diff --git a/tools/testing/selftests/drivers/net/dsa/lib.sh b/tools/testing/selftests/drivers/net/dsa/lib.sh
new file mode 120000
index 000000000000..39c96828c5ef
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/lib.sh
@@ -0,0 +1 @@
+../../../net/forwarding/lib.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/local_termination.sh b/tools/testing/selftests/drivers/net/dsa/local_termination.sh
new file mode 120000
index 000000000000..c08166f84501
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/local_termination.sh
@@ -0,0 +1 @@
+../../../net/forwarding/local_termination.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/dsa/no_forwarding.sh b/tools/testing/selftests/drivers/net/dsa/no_forwarding.sh
new file mode 120000
index 000000000000..b9757466bc97
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/dsa/no_forwarding.sh
@@ -0,0 +1 @@
+../../../net/forwarding/no_forwarding.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_linecard.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_linecard.sh
new file mode 100755
index 000000000000..224ca3695c89
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_linecard.sh
@@ -0,0 +1,334 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# In addition to the common variables, user might use:
+# LC_SLOT - If not set, all probed line cards are going to be tested,
+# with an exception of the "activation_16x100G_test".
+# It set, only the selected line card is going to be used
+# for tests, including "activation_16x100G_test".
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ unprovision_test
+ provision_test
+ activation_16x100G_test
+"
+
+NUM_NETIFS=0
+
+source $lib_dir/lib.sh
+source $lib_dir/devlink_lib.sh
+
+until_lc_state_is()
+{
+ local state=$1; shift
+ local current=$("$@")
+
+ echo "$current"
+ [ "$current" == "$state" ]
+}
+
+until_lc_state_is_not()
+{
+ ! until_lc_state_is "$@"
+}
+
+lc_state_get()
+{
+ local lc=$1
+
+ devlink lc show $DEVLINK_DEV lc $lc -j | jq -e -r ".[][][].state"
+}
+
+lc_wait_until_state_changes()
+{
+ local lc=$1
+ local state=$2
+ local timeout=$3 # ms
+
+ busywait "$timeout" until_lc_state_is_not "$state" lc_state_get "$lc"
+}
+
+lc_wait_until_state_becomes()
+{
+ local lc=$1
+ local state=$2
+ local timeout=$3 # ms
+
+ busywait "$timeout" until_lc_state_is "$state" lc_state_get "$lc"
+}
+
+until_lc_port_count_is()
+{
+ local port_count=$1; shift
+ local current=$("$@")
+
+ echo "$current"
+ [ $current == $port_count ]
+}
+
+lc_port_count_get()
+{
+ local lc=$1
+
+ devlink port -j | jq -e -r ".[][] | select(.lc==$lc) | .port" | wc -l
+}
+
+lc_wait_until_port_count_is()
+{
+ local lc=$1
+ local port_count=$2
+ local timeout=$3 # ms
+
+ busywait "$timeout" until_lc_port_count_is "$port_count" lc_port_count_get "$lc"
+}
+
+lc_nested_devlink_dev_get()
+{
+ local lc=$1
+
+ devlink lc show $DEVLINK_DEV lc $lc -j | jq -e -r ".[][][].nested_devlink"
+}
+
+PROV_UNPROV_TIMEOUT=8000 # ms
+POST_PROV_ACT_TIMEOUT=2000 # ms
+PROV_PORTS_INSTANTIATION_TIMEOUT=15000 # ms
+
+unprovision_one()
+{
+ local lc=$1
+ local state
+
+ state=$(lc_state_get $lc)
+ check_err $? "Failed to get state of linecard $lc"
+ if [[ "$state" == "unprovisioned" ]]; then
+ return
+ fi
+
+ log_info "Unprovisioning linecard $lc"
+
+ devlink lc set $DEVLINK_DEV lc $lc notype
+ check_err $? "Failed to trigger linecard $lc unprovisioning"
+
+ state=$(lc_wait_until_state_changes $lc "unprovisioning" \
+ $PROV_UNPROV_TIMEOUT)
+ check_err $? "Failed to unprovision linecard $lc (timeout)"
+
+ [ "$state" == "unprovisioned" ]
+ check_err $? "Failed to unprovision linecard $lc (state=$state)"
+}
+
+provision_one()
+{
+ local lc=$1
+ local type=$2
+ local state
+
+ log_info "Provisioning linecard $lc"
+
+ devlink lc set $DEVLINK_DEV lc $lc type $type
+ check_err $? "Failed trigger linecard $lc provisioning"
+
+ state=$(lc_wait_until_state_changes $lc "provisioning" \
+ $PROV_UNPROV_TIMEOUT)
+ check_err $? "Failed to provision linecard $lc (timeout)"
+
+ [ "$state" == "provisioned" ] || [ "$state" == "active" ]
+ check_err $? "Failed to provision linecard $lc (state=$state)"
+
+ provisioned_type=$(devlink lc show $DEVLINK_DEV lc $lc -j | jq -e -r ".[][][].type")
+ [ "$provisioned_type" == "$type" ]
+ check_err $? "Wrong provision type returned for linecard $lc (got \"$provisioned_type\", expected \"$type\")"
+
+ # Wait for possible activation to make sure the state
+ # won't change after return from this function.
+ state=$(lc_wait_until_state_becomes $lc "active" \
+ $POST_PROV_ACT_TIMEOUT)
+}
+
+unprovision_test()
+{
+ RET=0
+ local lc
+
+ lc=$LC_SLOT
+ unprovision_one $lc
+ log_test "Unprovision"
+}
+
+LC_16X100G_TYPE="16x100G"
+LC_16X100G_PORT_COUNT=16
+
+supported_types_check()
+{
+ local lc=$1
+ local supported_types_count
+ local type_index
+ local lc_16x100_found=false
+
+ supported_types_count=$(devlink lc show $DEVLINK_DEV lc $lc -j | \
+ jq -e -r ".[][][].supported_types | length")
+ [ $supported_types_count != 0 ]
+ check_err $? "No supported types found for linecard $lc"
+ for (( type_index=0; type_index<$supported_types_count; type_index++ ))
+ do
+ type=$(devlink lc show $DEVLINK_DEV lc $lc -j | \
+ jq -e -r ".[][][].supported_types[$type_index]")
+ if [[ "$type" == "$LC_16X100G_TYPE" ]]; then
+ lc_16x100_found=true
+ break
+ fi
+ done
+ [ $lc_16x100_found = true ]
+ check_err $? "16X100G not found between supported types of linecard $lc"
+}
+
+ports_check()
+{
+ local lc=$1
+ local expected_port_count=$2
+ local port_count
+
+ port_count=$(lc_wait_until_port_count_is $lc $expected_port_count \
+ $PROV_PORTS_INSTANTIATION_TIMEOUT)
+ [ $port_count != 0 ]
+ check_err $? "No port associated with linecard $lc"
+ [ $port_count == $expected_port_count ]
+ check_err $? "Unexpected port count linecard $lc (got $port_count, expected $expected_port_count)"
+}
+
+lc_dev_info_provisioned_check()
+{
+ local lc=$1
+ local nested_devlink_dev=$2
+ local fixed_hw_revision
+ local running_ini_version
+
+ fixed_hw_revision=$(devlink dev info $nested_devlink_dev -j | \
+ jq -e -r '.[][].versions.fixed."hw.revision"')
+ check_err $? "Failed to get linecard $lc fixed.hw.revision"
+ log_info "Linecard $lc fixed.hw.revision: \"$fixed_hw_revision\""
+ running_ini_version=$(devlink dev info $nested_devlink_dev -j | \
+ jq -e -r '.[][].versions.running."ini.version"')
+ check_err $? "Failed to get linecard $lc running.ini.version"
+ log_info "Linecard $lc running.ini.version: \"$running_ini_version\""
+}
+
+provision_test()
+{
+ RET=0
+ local lc
+ local type
+ local state
+ local nested_devlink_dev
+
+ lc=$LC_SLOT
+ supported_types_check $lc
+ state=$(lc_state_get $lc)
+ check_err $? "Failed to get state of linecard $lc"
+ if [[ "$state" != "unprovisioned" ]]; then
+ unprovision_one $lc
+ fi
+ provision_one $lc $LC_16X100G_TYPE
+ ports_check $lc $LC_16X100G_PORT_COUNT
+
+ nested_devlink_dev=$(lc_nested_devlink_dev_get $lc)
+ check_err $? "Failed to get nested devlink handle of linecard $lc"
+ lc_dev_info_provisioned_check $lc $nested_devlink_dev
+
+ log_test "Provision"
+}
+
+ACTIVATION_TIMEOUT=20000 # ms
+
+interface_check()
+{
+ ip link set $h1 up
+ ip link set $h2 up
+ ifaces_upped=true
+ setup_wait
+}
+
+lc_dev_info_active_check()
+{
+ local lc=$1
+ local nested_devlink_dev=$2
+ local fixed_device_fw_psid
+ local running_device_fw
+
+ fixed_device_fw_psid=$(devlink dev info $nested_devlink_dev -j | \
+ jq -e -r ".[][].versions.fixed" | \
+ jq -e -r '."fw.psid"')
+ check_err $? "Failed to get linecard $lc fixed fw PSID"
+ log_info "Linecard $lc fixed.fw.psid: \"$fixed_device_fw_psid\""
+
+ running_device_fw=$(devlink dev info $nested_devlink_dev -j | \
+ jq -e -r ".[][].versions.running.fw")
+ check_err $? "Failed to get linecard $lc running.fw.version"
+ log_info "Linecard $lc running.fw: \"$running_device_fw\""
+}
+
+activation_16x100G_test()
+{
+ RET=0
+ local lc
+ local type
+ local state
+ local nested_devlink_dev
+
+ lc=$LC_SLOT
+ type=$LC_16X100G_TYPE
+
+ unprovision_one $lc
+ provision_one $lc $type
+ state=$(lc_wait_until_state_becomes $lc "active" \
+ $ACTIVATION_TIMEOUT)
+ check_err $? "Failed to get linecard $lc activated (timeout)"
+
+ interface_check
+
+ nested_devlink_dev=$(lc_nested_devlink_dev_get $lc)
+ check_err $? "Failed to get nested devlink handle of linecard $lc"
+ lc_dev_info_active_check $lc $nested_devlink_dev
+
+ log_test "Activation 16x100G"
+}
+
+setup_prepare()
+{
+ local lc_num=$(devlink lc show -j | jq -e -r ".[][\"$DEVLINK_DEV\"] |length")
+ if [[ $? -ne 0 ]] || [[ $lc_num -eq 0 ]]; then
+ echo "SKIP: No linecard support found"
+ exit $ksft_skip
+ fi
+
+ if [ -z "$LC_SLOT" ]; then
+ echo "SKIP: \"LC_SLOT\" variable not provided"
+ exit $ksft_skip
+ fi
+
+ # Interfaces are not present during the script start,
+ # that's why we define NUM_NETIFS here so dummy
+ # implicit veth pairs are not created.
+ NUM_NETIFS=2
+ h1=${NETIFS[p1]}
+ h2=${NETIFS[p2]}
+ ifaces_upped=false
+}
+
+cleanup()
+{
+ if [ "$ifaces_upped" = true ] ; then
+ ip link set $h1 down
+ ip link set $h2 down
+ fi
+}
+
+trap cleanup EXIT
+
+setup_prepare
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_burst.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_burst.sh
new file mode 100755
index 000000000000..82a47b903f92
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_burst.sh
@@ -0,0 +1,480 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# This test sends 1Gbps of traffic through the switch, into which it then
+# injects a burst of traffic and tests that there are no drops.
+#
+# The 1Gbps stream is created by sending >1Gbps stream from H1. This stream
+# ingresses through $swp1, and is forwarded thtrough a small temporary pool to a
+# 1Gbps $swp3.
+#
+# Thus a 1Gbps stream enters $swp4, and is forwarded through a large pool to
+# $swp2, and eventually to H2. Since $swp2 is a 1Gbps port as well, no backlog
+# is generated.
+#
+# At this point, a burst of traffic is forwarded from H3. This enters $swp5, is
+# forwarded to $swp2, which is fully subscribed by the 1Gbps stream. The
+# expectation is that the burst is wholly absorbed by the large pool and no
+# drops are caused. After the burst, there should be a backlog that is hard to
+# get rid of, because $sw2 is fully subscribed. But because each individual
+# packet is scheduled soon after getting enqueued, SLL and HLL do not impact the
+# test.
+#
+# +-----------------------+ +-----------------------+
+# | H1 | | H3 |
+# | + $h1.111 | | $h3.111 + |
+# | | 192.0.2.33/28 | | 192.0.2.35/28 | |
+# | | | | | |
+# | + $h1 | | $h3 + |
+# +---|-------------------+ +--------------------+ +------------------|----+
+# | | | |
+# +---|----------------------|--------------------|----------------------|----+
+# | + $swp1 $swp3 + + $swp4 $swp5 | |
+# | | iPOOL1 iPOOL0 | | iPOOL2 iPOOL2 | |
+# | | ePOOL4 ePOOL5 | | ePOOL4 ePOOL4 | |
+# | | 1Gbps | | 1Gbps | |
+# | +-|----------------------|-+ +-|----------------------|-+ |
+# | | + $swp1.111 $swp3.111 + | | + $swp4.111 $swp5.111 + | |
+# | | | | | |
+# | | BR1 | | BR2 | |
+# | | | | | |
+# | | | | + $swp2.111 | |
+# | +--------------------------+ +---------|----------------+ |
+# | | |
+# | iPOOL0: 500KB dynamic | |
+# | iPOOL1: 500KB dynamic | |
+# | iPOOL2: 10MB dynamic + $swp2 |
+# | ePOOL4: 500KB dynamic | iPOOL0 |
+# | ePOOL5: 500KB dnamic | ePOOL6 |
+# | ePOOL6: 10MB dynamic | 1Gbps |
+# +-------------------------------------------------------|-------------------+
+# |
+# +---|-------------------+
+# | + $h2 H2 |
+# | | 1Gbps |
+# | | |
+# | + $h2.111 |
+# | 192.0.2.34/28 |
+# +-----------------------+
+#
+# iPOOL0+ePOOL4 are helper pools for control traffic etc.
+# iPOOL1+ePOOL5 are helper pools for modeling the 1Gbps stream
+# iPOOL2+ePOOL6 are pools for soaking the burst traffic
+
+ALL_TESTS="
+ ping_ipv4
+ test_8K
+ test_800
+"
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+NUM_NETIFS=8
+source $lib_dir/lib.sh
+source $lib_dir/devlink_lib.sh
+source qos_lib.sh
+source mlxsw_lib.sh
+
+_1KB=1000
+_500KB=$((500 * _1KB))
+_1MB=$((1000 * _1KB))
+
+# The failure mode that this specifically tests is exhaustion of descriptor
+# buffer. The point is to produce a burst that shared buffer should be able
+# to accommodate, but produce it with small enough packets that the machine
+# runs out of the descriptor buffer space with default configuration.
+#
+# The machine therefore needs to be able to produce line rate with as small
+# packets as possible, and at the same time have large enough buffer that
+# when filled with these small packets, it runs out of descriptors.
+# Spectrum-2 is very close, but cannot perform this test. Therefore use
+# Spectrum-3 as a minimum, and permit larger burst size, and therefore
+# larger packets, to reduce spurious failures.
+#
+mlxsw_only_on_spectrum 3+ || exit
+
+BURST_SIZE=$((50000000))
+POOL_SIZE=$BURST_SIZE
+
+h1_create()
+{
+ simple_if_init $h1
+ mtu_set $h1 10000
+
+ vlan_create $h1 111 v$h1 192.0.2.33/28
+ ip link set dev $h1.111 type vlan egress-qos-map 0:1
+}
+
+h1_destroy()
+{
+ vlan_destroy $h1 111
+
+ mtu_restore $h1
+ simple_if_fini $h1
+}
+
+h2_create()
+{
+ simple_if_init $h2
+ mtu_set $h2 10000
+ ethtool -s $h2 speed 1000 autoneg off
+
+ vlan_create $h2 111 v$h2 192.0.2.34/28
+}
+
+h2_destroy()
+{
+ vlan_destroy $h2 111
+
+ ethtool -s $h2 autoneg on
+ mtu_restore $h2
+ simple_if_fini $h2
+}
+
+h3_create()
+{
+ simple_if_init $h3
+ mtu_set $h3 10000
+
+ vlan_create $h3 111 v$h3 192.0.2.35/28
+}
+
+h3_destroy()
+{
+ vlan_destroy $h3 111
+
+ mtu_restore $h3
+ simple_if_fini $h3
+}
+
+switch_create()
+{
+ # pools
+ # -----
+
+ devlink_pool_size_thtype_save 0
+ devlink_pool_size_thtype_save 4
+ devlink_pool_size_thtype_save 1
+ devlink_pool_size_thtype_save 5
+ devlink_pool_size_thtype_save 2
+ devlink_pool_size_thtype_save 6
+
+ devlink_port_pool_th_save $swp1 1
+ devlink_port_pool_th_save $swp2 6
+ devlink_port_pool_th_save $swp3 5
+ devlink_port_pool_th_save $swp4 2
+ devlink_port_pool_th_save $swp5 2
+
+ devlink_tc_bind_pool_th_save $swp1 1 ingress
+ devlink_tc_bind_pool_th_save $swp2 1 egress
+ devlink_tc_bind_pool_th_save $swp3 1 egress
+ devlink_tc_bind_pool_th_save $swp4 1 ingress
+ devlink_tc_bind_pool_th_save $swp5 1 ingress
+
+ # Control traffic pools. Just reduce the size.
+ devlink_pool_size_thtype_set 0 dynamic $_500KB
+ devlink_pool_size_thtype_set 4 dynamic $_500KB
+
+ # Stream modeling pools.
+ devlink_pool_size_thtype_set 1 dynamic $_500KB
+ devlink_pool_size_thtype_set 5 dynamic $_500KB
+
+ # Burst soak pools.
+ devlink_pool_size_thtype_set 2 static $POOL_SIZE
+ devlink_pool_size_thtype_set 6 static $POOL_SIZE
+
+ # $swp1
+ # -----
+
+ ip link set dev $swp1 up
+ mtu_set $swp1 10000
+ vlan_create $swp1 111
+ ip link set dev $swp1.111 type vlan ingress-qos-map 0:0 1:1
+
+ devlink_port_pool_th_set $swp1 1 16
+ devlink_tc_bind_pool_th_set $swp1 1 ingress 1 16
+
+ # Configure qdisc...
+ tc qdisc replace dev $swp1 root handle 1: \
+ ets bands 8 strict 8 priomap 7 6
+ # ... so that we can assign prio1 traffic to PG1.
+ dcb buffer set dev $swp1 prio-buffer all:0 1:1
+
+ # $swp2
+ # -----
+
+ ip link set dev $swp2 up
+ mtu_set $swp2 10000
+ ethtool -s $swp2 speed 1000 autoneg off
+ vlan_create $swp2 111
+ ip link set dev $swp2.111 type vlan egress-qos-map 0:0 1:1
+
+ devlink_port_pool_th_set $swp2 6 $POOL_SIZE
+ devlink_tc_bind_pool_th_set $swp2 1 egress 6 $POOL_SIZE
+
+ # prio 0->TC0 (band 7), 1->TC1 (band 6)
+ tc qdisc replace dev $swp2 root handle 1: \
+ ets bands 8 strict 8 priomap 7 6
+
+ # $swp3
+ # -----
+
+ ip link set dev $swp3 up
+ mtu_set $swp3 10000
+ ethtool -s $swp3 speed 1000 autoneg off
+ vlan_create $swp3 111
+ ip link set dev $swp3.111 type vlan egress-qos-map 0:0 1:1
+
+ devlink_port_pool_th_set $swp3 5 16
+ devlink_tc_bind_pool_th_set $swp3 1 egress 5 16
+
+ # prio 0->TC0 (band 7), 1->TC1 (band 6)
+ tc qdisc replace dev $swp3 root handle 1: \
+ ets bands 8 strict 8 priomap 7 6
+
+ # $swp4
+ # -----
+
+ ip link set dev $swp4 up
+ mtu_set $swp4 10000
+ ethtool -s $swp4 speed 1000 autoneg off
+ vlan_create $swp4 111
+ ip link set dev $swp4.111 type vlan ingress-qos-map 0:0 1:1
+
+ devlink_port_pool_th_set $swp4 2 $POOL_SIZE
+ devlink_tc_bind_pool_th_set $swp4 1 ingress 2 $POOL_SIZE
+
+ # Configure qdisc...
+ tc qdisc replace dev $swp4 root handle 1: \
+ ets bands 8 strict 8 priomap 7 6
+ # ... so that we can assign prio1 traffic to PG1.
+ dcb buffer set dev $swp4 prio-buffer all:0 1:1
+
+ # $swp5
+ # -----
+
+ ip link set dev $swp5 up
+ mtu_set $swp5 10000
+ vlan_create $swp5 111
+ ip link set dev $swp5.111 type vlan ingress-qos-map 0:0 1:1
+
+ devlink_port_pool_th_set $swp5 2 $POOL_SIZE
+ devlink_tc_bind_pool_th_set $swp5 1 ingress 2 $POOL_SIZE
+
+ # Configure qdisc...
+ tc qdisc replace dev $swp5 root handle 1: \
+ ets bands 8 strict 8 priomap 7 6
+ # ... so that we can assign prio1 traffic to PG1.
+ dcb buffer set dev $swp5 prio-buffer all:0 1:1
+
+ # bridges
+ # -------
+
+ ip link add name br1 type bridge vlan_filtering 0
+ ip link set dev $swp1.111 master br1
+ ip link set dev $swp3.111 master br1
+ ip link set dev br1 up
+
+ ip link add name br2 type bridge vlan_filtering 0
+ ip link set dev $swp2.111 master br2
+ ip link set dev $swp4.111 master br2
+ ip link set dev $swp5.111 master br2
+ ip link set dev br2 up
+}
+
+switch_destroy()
+{
+ # Do this first so that we can reset the limits to values that are only
+ # valid for the original static / dynamic setting.
+ devlink_pool_size_thtype_restore 6
+ devlink_pool_size_thtype_restore 5
+ devlink_pool_size_thtype_restore 4
+ devlink_pool_size_thtype_restore 2
+ devlink_pool_size_thtype_restore 1
+ devlink_pool_size_thtype_restore 0
+
+ # bridges
+ # -------
+
+ ip link set dev br2 down
+ ip link set dev $swp5.111 nomaster
+ ip link set dev $swp4.111 nomaster
+ ip link set dev $swp2.111 nomaster
+ ip link del dev br2
+
+ ip link set dev br1 down
+ ip link set dev $swp3.111 nomaster
+ ip link set dev $swp1.111 nomaster
+ ip link del dev br1
+
+ # $swp5
+ # -----
+
+ dcb buffer set dev $swp5 prio-buffer all:0
+ tc qdisc del dev $swp5 root
+
+ devlink_tc_bind_pool_th_restore $swp5 1 ingress
+ devlink_port_pool_th_restore $swp5 2
+
+ vlan_destroy $swp5 111
+ mtu_restore $swp5
+ ip link set dev $swp5 down
+
+ # $swp4
+ # -----
+
+ dcb buffer set dev $swp4 prio-buffer all:0
+ tc qdisc del dev $swp4 root
+
+ devlink_tc_bind_pool_th_restore $swp4 1 ingress
+ devlink_port_pool_th_restore $swp4 2
+
+ vlan_destroy $swp4 111
+ ethtool -s $swp4 autoneg on
+ mtu_restore $swp4
+ ip link set dev $swp4 down
+
+ # $swp3
+ # -----
+
+ tc qdisc del dev $swp3 root
+
+ devlink_tc_bind_pool_th_restore $swp3 1 egress
+ devlink_port_pool_th_restore $swp3 5
+
+ vlan_destroy $swp3 111
+ ethtool -s $swp3 autoneg on
+ mtu_restore $swp3
+ ip link set dev $swp3 down
+
+ # $swp2
+ # -----
+
+ tc qdisc del dev $swp2 root
+
+ devlink_tc_bind_pool_th_restore $swp2 1 egress
+ devlink_port_pool_th_restore $swp2 6
+
+ vlan_destroy $swp2 111
+ ethtool -s $swp2 autoneg on
+ mtu_restore $swp2
+ ip link set dev $swp2 down
+
+ # $swp1
+ # -----
+
+ dcb buffer set dev $swp1 prio-buffer all:0
+ tc qdisc del dev $swp1 root
+
+ devlink_tc_bind_pool_th_restore $swp1 1 ingress
+ devlink_port_pool_th_restore $swp1 1
+
+ vlan_destroy $swp1 111
+ mtu_restore $swp1
+ ip link set dev $swp1 down
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ swp3=${NETIFS[p5]}
+ swp4=${NETIFS[p6]}
+
+ swp5=${NETIFS[p7]}
+ h3=${NETIFS[p8]}
+
+ h2mac=$(mac_get $h2)
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+ h3_create
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+ h3_destroy
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 192.0.2.34 " h1->h2"
+ ping_test $h3 192.0.2.34 " h3->h2"
+}
+
+__test_qos_burst()
+{
+ local pktsize=$1; shift
+
+ RET=0
+
+ start_traffic_pktsize $pktsize $h1.111 192.0.2.33 192.0.2.34 $h2mac
+ sleep 1
+
+ local q0=$(ethtool_stats_get $swp2 tc_transmit_queue_tc_1)
+ ((q0 == 0))
+ check_err $? "Transmit queue non-zero?"
+
+ local d0=$(ethtool_stats_get $swp2 tc_no_buffer_discard_uc_tc_1)
+
+ local cell_size=$(devlink_cell_size_get)
+ local cells=$((BURST_SIZE / cell_size))
+ # Each packet is $pktsize of payload + headers.
+ local pkt_cells=$(((pktsize + 50 + cell_size - 1) / cell_size))
+ # How many packets can we admit:
+ local pkts=$((cells / pkt_cells))
+
+ $MZ $h3 -p $pktsize -Q 1:111 -A 192.0.2.35 -B 192.0.2.34 \
+ -a own -b $h2mac -c $pkts -t udp -q
+ sleep 1
+
+ local d1=$(ethtool_stats_get $swp2 tc_no_buffer_discard_uc_tc_1)
+ ((d1 == d0))
+ check_err $? "Drops seen on egress port: $d0 -> $d1 ($((d1 - d0)))"
+
+ # Check that the queue is somewhat close to the burst size This
+ # makes sure that the lack of drops above was not due to port
+ # undersubscribtion.
+ local q0=$(ethtool_stats_get $swp2 tc_transmit_queue_tc_1)
+ local qe=$((90 * BURST_SIZE / 100))
+ ((q0 > qe))
+ check_err $? "Queue size expected >$qe, got $q0"
+
+ stop_traffic
+ sleep 2
+
+ log_test "Burst: absorb $pkts ${pktsize}-B packets"
+}
+
+test_8K()
+{
+ __test_qos_burst 8000
+}
+
+test_800()
+{
+ __test_qos_burst 800
+}
+
+bail_on_lldpad
+
+trap cleanup EXIT
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh
index f4493ef9cca1..3569ff45f7d5 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh
@@ -371,9 +371,9 @@ test_tc_int_buf()
tc qdisc delete dev $swp root
}
-trap cleanup EXIT
-
bail_on_lldpad
+
+trap cleanup EXIT
setup_wait
tests_run
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
index 5d5622fc2758..f9858e221996 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
@@ -393,9 +393,9 @@ test_qos_pfc()
log_test "PFC"
}
-trap cleanup EXIT
-
bail_on_lldpad
+
+trap cleanup EXIT
setup_prepare
setup_wait
tests_run
diff --git a/tools/testing/selftests/drivers/net/mlxsw/rif_counter_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/rif_counter_scale.sh
new file mode 100644
index 000000000000..a43a9926e690
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/rif_counter_scale.sh
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: GPL-2.0
+
+RIF_COUNTER_NUM_NETIFS=2
+
+rif_counter_addr4()
+{
+ local i=$1; shift
+ local p=$1; shift
+
+ printf 192.0.%d.%d $((i / 64)) $(((4 * i % 256) + p))
+}
+
+rif_counter_addr4pfx()
+{
+ rif_counter_addr4 $@
+ printf /30
+}
+
+rif_counter_h1_create()
+{
+ simple_if_init $h1
+}
+
+rif_counter_h1_destroy()
+{
+ simple_if_fini $h1
+}
+
+rif_counter_h2_create()
+{
+ simple_if_init $h2
+}
+
+rif_counter_h2_destroy()
+{
+ simple_if_fini $h2
+}
+
+rif_counter_setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ h2=${NETIFS[p2]}
+
+ vrf_prepare
+
+ rif_counter_h1_create
+ rif_counter_h2_create
+}
+
+rif_counter_cleanup()
+{
+ local count=$1; shift
+
+ pre_cleanup
+
+ for ((i = 1; i <= count; i++)); do
+ vlan_destroy $h2 $i
+ done
+
+ rif_counter_h2_destroy
+ rif_counter_h1_destroy
+
+ vrf_cleanup
+
+ if [[ -v RIF_COUNTER_BATCH_FILE ]]; then
+ rm -f $RIF_COUNTER_BATCH_FILE
+ fi
+}
+
+
+rif_counter_test()
+{
+ local count=$1; shift
+ local should_fail=$1; shift
+
+ RIF_COUNTER_BATCH_FILE="$(mktemp)"
+
+ for ((i = 1; i <= count; i++)); do
+ vlan_create $h2 $i v$h2 $(rif_counter_addr4pfx $i 2)
+ done
+ for ((i = 1; i <= count; i++)); do
+ cat >> $RIF_COUNTER_BATCH_FILE <<-EOF
+ stats set dev $h2.$i l3_stats on
+ EOF
+ done
+
+ ip -b $RIF_COUNTER_BATCH_FILE
+ check_err_fail $should_fail $? "RIF counter enablement"
+}
+
+rif_counter_traffic_test()
+{
+ local count=$1; shift
+ local i;
+
+ for ((i = count; i > 0; i /= 2)); do
+ $MZ $h1 -Q $i -c 1 -d 20msec -p 100 -a own -b $(mac_get $h2) \
+ -A $(rif_counter_addr4 $i 1) \
+ -B $(rif_counter_addr4 $i 2) \
+ -q -t udp sp=54321,dp=12345
+ done
+ for ((i = count; i > 0; i /= 2)); do
+ busywait "$TC_HIT_TIMEOUT" until_counter_is "== 1" \
+ hw_stats_get l3_stats $h2.$i rx packets > /dev/null
+ check_err $? "Traffic not seen at RIF $h2.$i"
+ done
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh
index 1e5ad3209436..7a73057206cd 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh
@@ -166,12 +166,11 @@ ecn_mirror_test()
uninstall_qdisc
}
-trap cleanup EXIT
+bail_on_lldpad
+trap cleanup EXIT
setup_prepare
setup_wait
-
-bail_on_lldpad
tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh
index d79a82f317d2..501d192529ac 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh
@@ -73,12 +73,11 @@ red_mirror_test()
uninstall_qdisc
}
-trap cleanup EXIT
+bail_on_lldpad
+trap cleanup EXIT
setup_prepare
setup_wait
-
-bail_on_lldpad
tests_run
exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
index e9f65bd2e299..688338bbeb97 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
@@ -25,7 +25,16 @@ cleanup()
trap cleanup EXIT
-ALL_TESTS="router tc_flower mirror_gre tc_police port rif_mac_profile"
+ALL_TESTS="
+ router
+ tc_flower
+ mirror_gre
+ tc_police
+ port
+ rif_mac_profile
+ rif_counter
+"
+
for current_test in ${TESTS:-$ALL_TESTS}; do
RET_FIN=0
source ${current_test}_scale.sh
@@ -36,16 +45,32 @@ for current_test in ${TESTS:-$ALL_TESTS}; do
for should_fail in 0 1; do
RET=0
target=$(${current_test}_get_target "$should_fail")
+ if ((target == 0)); then
+ log_test_skip "'$current_test' should_fail=$should_fail test"
+ continue
+ fi
+
${current_test}_setup_prepare
setup_wait $num_netifs
+ # Update target in case occupancy of a certain resource changed
+ # following the test setup.
+ target=$(${current_test}_get_target "$should_fail")
${current_test}_test "$target" "$should_fail"
- ${current_test}_cleanup
- devlink_reload
if [[ "$should_fail" -eq 0 ]]; then
log_test "'$current_test' $target"
+
+ if ((!RET)); then
+ tt=${current_test}_traffic_test
+ if [[ $(type -t $tt) == "function" ]]; then
+ $tt "$target"
+ log_test "'$current_test' $target traffic test"
+ fi
+ fi
else
log_test "'$current_test' overflow $target"
fi
+ ${current_test}_cleanup $target
+ devlink_reload
RET_FIN=$(( RET_FIN || RET ))
done
done
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/rif_counter_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/rif_counter_scale.sh
new file mode 120000
index 000000000000..1f5752e8ffc0
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/rif_counter_scale.sh
@@ -0,0 +1 @@
+../spectrum/rif_counter_scale.sh \ No newline at end of file
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower_scale.sh
index efd798a85931..4444bbace1a9 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower_scale.sh
@@ -4,17 +4,22 @@ source ../tc_flower_scale.sh
tc_flower_get_target()
{
local should_fail=$1; shift
+ local max_cnts
# The driver associates a counter with each tc filter, which means the
# number of supported filters is bounded by the number of available
# counters.
- # Currently, the driver supports 30K (30,720) flow counters and six of
- # these are used for multicast routing.
- local target=30714
+ max_cnts=$(devlink_resource_size_get counters flow)
+
+ # Remove already allocated counters.
+ ((max_cnts -= $(devlink_resource_occ_get counters flow)))
+
+ # Each rule uses two counters, for packets and bytes.
+ ((max_cnts /= 2))
if ((! should_fail)); then
- echo $target
+ echo $max_cnts
else
- echo $((target + 1))
+ echo $((max_cnts + 1))
fi
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
index dea33dc93790..95d9f710a630 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
@@ -22,7 +22,16 @@ cleanup()
devlink_sp_read_kvd_defaults
trap cleanup EXIT
-ALL_TESTS="router tc_flower mirror_gre tc_police port rif_mac_profile"
+ALL_TESTS="
+ router
+ tc_flower
+ mirror_gre
+ tc_police
+ port
+ rif_mac_profile
+ rif_counter
+"
+
for current_test in ${TESTS:-$ALL_TESTS}; do
RET_FIN=0
source ${current_test}_scale.sh
@@ -41,15 +50,31 @@ for current_test in ${TESTS:-$ALL_TESTS}; do
for should_fail in 0 1; do
RET=0
target=$(${current_test}_get_target "$should_fail")
+ if ((target == 0)); then
+ log_test_skip "'$current_test' [$profile] should_fail=$should_fail test"
+ continue
+ fi
${current_test}_setup_prepare
setup_wait $num_netifs
+ # Update target in case occupancy of a certain resource
+ # changed following the test setup.
+ target=$(${current_test}_get_target "$should_fail")
${current_test}_test "$target" "$should_fail"
- ${current_test}_cleanup
if [[ "$should_fail" -eq 0 ]]; then
log_test "'$current_test' [$profile] $target"
+
+ if ((!RET)); then
+ tt=${current_test}_traffic_test
+ if [[ $(type -t $tt) == "function" ]]
+ then
+ $tt "$target"
+ log_test "'$current_test' [$profile] $target traffic test"
+ fi
+ fi
else
log_test "'$current_test' [$profile] overflow $target"
fi
+ ${current_test}_cleanup $target
RET_FIN=$(( RET_FIN || RET ))
done
done
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/rif_counter_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/rif_counter_scale.sh
new file mode 100644
index 000000000000..d44536276e8a
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/rif_counter_scale.sh
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0
+source ../rif_counter_scale.sh
+
+rif_counter_get_target()
+{
+ local should_fail=$1; shift
+ local max_cnts
+ local max_rifs
+ local target
+
+ max_rifs=$(devlink_resource_size_get rifs)
+ max_cnts=$(devlink_resource_size_get counters rif)
+
+ # Remove already allocated RIFs.
+ ((max_rifs -= $(devlink_resource_occ_get rifs)))
+
+ # 10 KVD slots per counter, ingress+egress counters per RIF
+ ((max_cnts /= 20))
+
+ # Pointless to run the overflow test if we don't have enough RIFs to
+ # host all the counters.
+ if ((max_cnts > max_rifs && should_fail)); then
+ echo 0
+ return
+ fi
+
+ target=$((max_rifs < max_cnts ? max_rifs : max_cnts))
+
+ if ((! should_fail)); then
+ echo $target
+ else
+ echo $((target + 1))
+ fi
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
index aa74be9f47c8..d3d9e60d6ddf 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
@@ -77,6 +77,7 @@ tc_flower_rules_create()
filter add dev $h2 ingress \
prot ipv6 \
pref 1000 \
+ handle 42$i \
flower $tcflags dst_ip $(tc_flower_addr $i) \
action drop
EOF
@@ -121,3 +122,19 @@ tc_flower_test()
tcflags="skip_sw"
__tc_flower_test $count $should_fail
}
+
+tc_flower_traffic_test()
+{
+ local count=$1; shift
+ local i;
+
+ for ((i = count - 1; i > 0; i /= 2)); do
+ $MZ -6 $h1 -c 1 -d 20msec -p 100 -a own -b $(mac_get $h2) \
+ -A $(tc_flower_addr 0) -B $(tc_flower_addr $i) \
+ -q -t udp sp=54321,dp=12345
+ done
+ for ((i = count - 1; i > 0; i /= 2)); do
+ tc_check_packets "dev $h2 ingress" 42$i 1
+ check_err $? "Traffic not seen at rule #$i"
+ done
+}
diff --git a/tools/testing/selftests/drivers/net/netdevsim/fib.sh b/tools/testing/selftests/drivers/net/netdevsim/fib.sh
index fc794cd30389..6800de816e8b 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/fib.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/fib.sh
@@ -16,6 +16,7 @@ ALL_TESTS="
ipv4_replay
ipv4_flush
ipv4_error_path
+ ipv4_delete_fail
ipv6_add
ipv6_metric
ipv6_append_single
@@ -29,11 +30,13 @@ ALL_TESTS="
ipv6_replay_single
ipv6_replay_multipath
ipv6_error_path
+ ipv6_delete_fail
"
NETDEVSIM_PATH=/sys/bus/netdevsim/
DEV_ADDR=1337
DEV=netdevsim${DEV_ADDR}
SYSFS_NET_DIR=/sys/bus/netdevsim/devices/$DEV/net/
+DEBUGFS_DIR=/sys/kernel/debug/netdevsim/$DEV/
NUM_NETIFS=0
source $lib_dir/lib.sh
source $lib_dir/fib_offload_lib.sh
@@ -157,6 +160,27 @@ ipv4_error_path()
ipv4_error_path_replay
}
+ipv4_delete_fail()
+{
+ RET=0
+
+ echo "y" > $DEBUGFS_DIR/fib/fail_route_delete
+
+ ip -n testns1 link add name dummy1 type dummy
+ ip -n testns1 link set dev dummy1 up
+
+ ip -n testns1 route add 192.0.2.0/24 dev dummy1
+ ip -n testns1 route del 192.0.2.0/24 dev dummy1 &> /dev/null
+
+ # We should not be able to delete the netdev if we are leaking a
+ # reference.
+ ip -n testns1 link del dev dummy1
+
+ log_test "IPv4 route delete failure"
+
+ echo "n" > $DEBUGFS_DIR/fib/fail_route_delete
+}
+
ipv6_add()
{
fib_ipv6_add_test "testns1"
@@ -304,6 +328,27 @@ ipv6_error_path()
ipv6_error_path_replay
}
+ipv6_delete_fail()
+{
+ RET=0
+
+ echo "y" > $DEBUGFS_DIR/fib/fail_route_delete
+
+ ip -n testns1 link add name dummy1 type dummy
+ ip -n testns1 link set dev dummy1 up
+
+ ip -n testns1 route add 2001:db8:1::/64 dev dummy1
+ ip -n testns1 route del 2001:db8:1::/64 dev dummy1 &> /dev/null
+
+ # We should not be able to delete the netdev if we are leaking a
+ # reference.
+ ip -n testns1 link del dev dummy1
+
+ log_test "IPv6 route delete failure"
+
+ echo "n" > $DEBUGFS_DIR/fib/fail_route_delete
+}
+
fib_notify_on_flag_change_set()
{
local notify=$1; shift
diff --git a/tools/testing/selftests/drivers/net/netdevsim/hw_stats_l3.sh b/tools/testing/selftests/drivers/net/netdevsim/hw_stats_l3.sh
index fe1898402987..cba5ac08426b 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/hw_stats_l3.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/hw_stats_l3.sh
@@ -319,11 +319,11 @@ counter_test()
((pkts < 10))
check_err $? "$type stats show >= 10 packets after first enablement"
- sleep 2
+ sleep 2.5
local pkts=$(get_hwstat dummy1 l3 rx.packets)
((pkts >= 20))
- check_err $? "$type stats show < 20 packets after 2s passed"
+ check_err $? "$type stats show < 20 packets after 2.5s passed"
$IP stats set dev dummy1 ${type}_stats off
diff --git a/tools/testing/selftests/drivers/net/ocelot/basic_qos.sh b/tools/testing/selftests/drivers/net/ocelot/basic_qos.sh
new file mode 100755
index 000000000000..c51c83421c61
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/ocelot/basic_qos.sh
@@ -0,0 +1,253 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright 2022 NXP
+
+# The script is mostly generic, with the exception of the
+# ethtool per-TC counter names ("rx_green_prio_${tc}")
+
+WAIT_TIME=1
+NUM_NETIFS=4
+STABLE_MAC_ADDRS=yes
+NETIF_CREATE=no
+lib_dir=$(dirname $0)/../../../net/forwarding
+source $lib_dir/tc_common.sh
+source $lib_dir/lib.sh
+
+require_command dcb
+
+h1=${NETIFS[p1]}
+swp1=${NETIFS[p2]}
+swp2=${NETIFS[p3]}
+h2=${NETIFS[p4]}
+
+H1_IPV4="192.0.2.1"
+H2_IPV4="192.0.2.2"
+H1_IPV6="2001:db8:1::1"
+H2_IPV6="2001:db8:1::2"
+
+h1_create()
+{
+ simple_if_init $h1 $H1_IPV4/24 $H1_IPV6/64
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 $H1_IPV4/24 $H1_IPV6/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 $H2_IPV4/24 $H2_IPV6/64
+}
+
+h2_destroy()
+{
+ simple_if_fini $h2 $H2_IPV4/24 $H2_IPV6/64
+}
+
+h1_vlan_create()
+{
+ local vid=$1
+
+ vlan_create $h1 $vid
+ simple_if_init $h1.$vid $H1_IPV4/24 $H1_IPV6/64
+ ip link set $h1.$vid type vlan \
+ egress-qos-map 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 \
+ ingress-qos-map 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
+}
+
+h1_vlan_destroy()
+{
+ local vid=$1
+
+ simple_if_fini $h1.$vid $H1_IPV4/24 $H1_IPV6/64
+ vlan_destroy $h1 $vid
+}
+
+h2_vlan_create()
+{
+ local vid=$1
+
+ vlan_create $h2 $vid
+ simple_if_init $h2.$vid $H2_IPV4/24 $H2_IPV6/64
+ ip link set $h2.$vid type vlan \
+ egress-qos-map 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 \
+ ingress-qos-map 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7
+}
+
+h2_vlan_destroy()
+{
+ local vid=$1
+
+ simple_if_fini $h2.$vid $H2_IPV4/24 $H2_IPV6/64
+ vlan_destroy $h2 $vid
+}
+
+vlans_prepare()
+{
+ h1_vlan_create 100
+ h2_vlan_create 100
+
+ tc qdisc add dev ${h1}.100 clsact
+ tc filter add dev ${h1}.100 egress protocol ipv4 \
+ flower ip_proto icmp action skbedit priority 3
+ tc filter add dev ${h1}.100 egress protocol ipv6 \
+ flower ip_proto icmpv6 action skbedit priority 3
+}
+
+vlans_destroy()
+{
+ tc qdisc del dev ${h1}.100 clsact
+
+ h1_vlan_destroy 100
+ h2_vlan_destroy 100
+}
+
+switch_create()
+{
+ ip link set ${swp1} up
+ ip link set ${swp2} up
+
+ # Ports should trust VLAN PCP even with vlan_filtering=0
+ ip link add br0 type bridge
+ ip link set ${swp1} master br0
+ ip link set ${swp2} master br0
+ ip link set br0 up
+}
+
+switch_destroy()
+{
+ ip link del br0
+}
+
+setup_prepare()
+{
+ vrf_prepare
+
+ h1_create
+ h2_create
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ h2_destroy
+ h1_destroy
+ switch_destroy
+
+ vrf_cleanup
+}
+
+dscp_cs_to_tos()
+{
+ local dscp_cs=$1
+
+ # https://datatracker.ietf.org/doc/html/rfc2474
+ # 4.2.2.1 The Class Selector Codepoints
+ echo $((${dscp_cs} << 5))
+}
+
+run_test()
+{
+ local test_name=$1; shift
+ local if_name=$1; shift
+ local tc=$1; shift
+ local tos=$1; shift
+ local counter_name="rx_green_prio_${tc}"
+ local ipv4_before
+ local ipv4_after
+ local ipv6_before
+ local ipv6_after
+
+ ipv4_before=$(ethtool_stats_get ${swp1} "${counter_name}")
+ ping_do ${if_name} $H2_IPV4 "-Q ${tos}"
+ ipv4_after=$(ethtool_stats_get ${swp1} "${counter_name}")
+
+ if [ $((${ipv4_after} - ${ipv4_before})) -lt ${PING_COUNT} ]; then
+ RET=1
+ else
+ RET=0
+ fi
+ log_test "IPv4 ${test_name}"
+
+ ipv6_before=$(ethtool_stats_get ${swp1} "${counter_name}")
+ ping_do ${if_name} $H2_IPV6 "-Q ${tos}"
+ ipv6_after=$(ethtool_stats_get ${swp1} "${counter_name}")
+
+ if [ $((${ipv6_after} - ${ipv6_before})) -lt ${PING_COUNT} ]; then
+ RET=1
+ else
+ RET=0
+ fi
+ log_test "IPv6 ${test_name}"
+}
+
+port_default_prio_get()
+{
+ local if_name=$1
+ local prio
+
+ prio="$(dcb -j app show dev ${if_name} default-prio | \
+ jq '.default_prio[]')"
+ if [ -z "${prio}" ]; then
+ prio=0
+ fi
+
+ echo ${prio}
+}
+
+test_port_default()
+{
+ local orig=$(port_default_prio_get ${swp1})
+ local dmac=$(mac_get ${h2})
+
+ dcb app replace dev ${swp1} default-prio 5
+
+ run_test "Port-default QoS classification" ${h1} 5 0
+
+ dcb app replace dev ${swp1} default-prio ${orig}
+}
+
+test_vlan_pcp()
+{
+ vlans_prepare
+
+ run_test "Trusted VLAN PCP QoS classification" ${h1}.100 3 0
+
+ vlans_destroy
+}
+
+test_ip_dscp()
+{
+ local port_default=$(port_default_prio_get ${swp1})
+ local tos=$(dscp_cs_to_tos 4)
+
+ dcb app add dev ${swp1} dscp-prio CS4:4
+ run_test "Trusted DSCP QoS classification" ${h1} 4 ${tos}
+ dcb app del dev ${swp1} dscp-prio CS4:4
+
+ vlans_prepare
+ run_test "Untrusted DSCP QoS classification follows VLAN PCP" \
+ ${h1}.100 3 ${tos}
+ vlans_destroy
+
+ run_test "Untrusted DSCP QoS classification follows port default" \
+ ${h1} ${port_default} ${tos}
+}
+
+trap cleanup EXIT
+
+ALL_TESTS="
+ test_port_default
+ test_vlan_pcp
+ test_ip_dscp
+"
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/ocelot/psfp.sh b/tools/testing/selftests/drivers/net/ocelot/psfp.sh
new file mode 100755
index 000000000000..5a5cee92c665
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/ocelot/psfp.sh
@@ -0,0 +1,327 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright 2021-2022 NXP
+
+# Note: On LS1028A, in lack of enough user ports, this setup requires patching
+# the device tree to use the second CPU port as a user port
+
+WAIT_TIME=1
+NUM_NETIFS=4
+STABLE_MAC_ADDRS=yes
+NETIF_CREATE=no
+lib_dir=$(dirname $0)/../../../net/forwarding
+source $lib_dir/tc_common.sh
+source $lib_dir/lib.sh
+source $lib_dir/tsn_lib.sh
+
+UDS_ADDRESS_H1="/var/run/ptp4l_h1"
+UDS_ADDRESS_SWP1="/var/run/ptp4l_swp1"
+
+# Tunables
+NUM_PKTS=1000
+STREAM_VID=100
+STREAM_PRIO=6
+# Use a conservative cycle of 10 ms to allow the test to still pass when the
+# kernel has some extra overhead like lockdep etc
+CYCLE_TIME_NS=10000000
+# Create two Gate Control List entries, one OPEN and one CLOSE, of equal
+# durations
+GATE_DURATION_NS=$((${CYCLE_TIME_NS} / 2))
+# Give 2/3 of the cycle time to user space and 1/3 to the kernel
+FUDGE_FACTOR=$((${CYCLE_TIME_NS} / 3))
+# Shift the isochron base time by half the gate time, so that packets are
+# always received by swp1 close to the middle of the time slot, to minimize
+# inaccuracies due to network sync
+SHIFT_TIME_NS=$((${GATE_DURATION_NS} / 2))
+
+h1=${NETIFS[p1]}
+swp1=${NETIFS[p2]}
+swp2=${NETIFS[p3]}
+h2=${NETIFS[p4]}
+
+H1_IPV4="192.0.2.1"
+H2_IPV4="192.0.2.2"
+H1_IPV6="2001:db8:1::1"
+H2_IPV6="2001:db8:1::2"
+
+# Chain number exported by the ocelot driver for
+# Per-Stream Filtering and Policing filters
+PSFP()
+{
+ echo 30000
+}
+
+psfp_chain_create()
+{
+ local if_name=$1
+
+ tc qdisc add dev $if_name clsact
+
+ tc filter add dev $if_name ingress chain 0 pref 49152 flower \
+ skip_sw action goto chain $(PSFP)
+}
+
+psfp_chain_destroy()
+{
+ local if_name=$1
+
+ tc qdisc del dev $if_name clsact
+}
+
+psfp_filter_check()
+{
+ local expected=$1
+ local packets=""
+ local drops=""
+ local stats=""
+
+ stats=$(tc -j -s filter show dev ${swp1} ingress chain $(PSFP) pref 1)
+ packets=$(echo ${stats} | jq ".[1].options.actions[].stats.packets")
+ drops=$(echo ${stats} | jq ".[1].options.actions[].stats.drops")
+
+ if ! [ "${packets}" = "${expected}" ]; then
+ printf "Expected filter to match on %d packets but matched on %d instead\n" \
+ "${expected}" "${packets}"
+ fi
+
+ echo "Hardware filter reports ${drops} drops"
+}
+
+h1_create()
+{
+ simple_if_init $h1 $H1_IPV4/24 $H1_IPV6/64
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 $H1_IPV4/24 $H1_IPV6/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 $H2_IPV4/24 $H2_IPV6/64
+}
+
+h2_destroy()
+{
+ simple_if_fini $h2 $H2_IPV4/24 $H2_IPV6/64
+}
+
+switch_create()
+{
+ local h2_mac_addr=$(mac_get $h2)
+
+ ip link set ${swp1} up
+ ip link set ${swp2} up
+
+ ip link add br0 type bridge vlan_filtering 1
+ ip link set ${swp1} master br0
+ ip link set ${swp2} master br0
+ ip link set br0 up
+
+ bridge vlan add dev ${swp2} vid ${STREAM_VID}
+ bridge vlan add dev ${swp1} vid ${STREAM_VID}
+ # PSFP on Ocelot requires the filter to also be added to the bridge
+ # FDB, and not be removed
+ bridge fdb add dev ${swp2} \
+ ${h2_mac_addr} vlan ${STREAM_VID} static master
+
+ psfp_chain_create ${swp1}
+
+ tc filter add dev ${swp1} ingress chain $(PSFP) pref 1 \
+ protocol 802.1Q flower skip_sw \
+ dst_mac ${h2_mac_addr} vlan_id ${STREAM_VID} \
+ action gate base-time 0.000000000 \
+ sched-entry OPEN ${GATE_DURATION_NS} -1 -1 \
+ sched-entry CLOSE ${GATE_DURATION_NS} -1 -1
+}
+
+switch_destroy()
+{
+ psfp_chain_destroy ${swp1}
+ ip link del br0
+}
+
+txtime_setup()
+{
+ local if_name=$1
+
+ tc qdisc add dev ${if_name} clsact
+ # Classify PTP on TC 7 and isochron on TC 6
+ tc filter add dev ${if_name} egress protocol 0x88f7 \
+ flower action skbedit priority 7
+ tc filter add dev ${if_name} egress protocol 802.1Q \
+ flower vlan_ethtype 0xdead action skbedit priority 6
+ tc qdisc add dev ${if_name} handle 100: parent root mqprio num_tc 8 \
+ queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 \
+ map 0 1 2 3 4 5 6 7 \
+ hw 1
+ # Set up TC 6 for SO_TXTIME. tc-mqprio queues count from 1.
+ tc qdisc replace dev ${if_name} parent 100:$((${STREAM_PRIO} + 1)) etf \
+ clockid CLOCK_TAI offload delta ${FUDGE_FACTOR}
+}
+
+txtime_cleanup()
+{
+ local if_name=$1
+
+ tc qdisc del dev ${if_name} root
+ tc qdisc del dev ${if_name} clsact
+}
+
+setup_prepare()
+{
+ vrf_prepare
+
+ h1_create
+ h2_create
+ switch_create
+
+ txtime_setup ${h1}
+
+ # Set up swp1 as a master PHC for h1, synchronized to the local
+ # CLOCK_REALTIME.
+ phc2sys_start ${swp1} ${UDS_ADDRESS_SWP1}
+
+ # Assumption true for LS1028A: h1 and h2 use the same PHC. So by
+ # synchronizing h1 to swp1 via PTP, h2 is also implicitly synchronized
+ # to swp1 (and both to CLOCK_REALTIME).
+ ptp4l_start ${h1} true ${UDS_ADDRESS_H1}
+ ptp4l_start ${swp1} false ${UDS_ADDRESS_SWP1}
+
+ # Make sure there are no filter matches at the beginning of the test
+ psfp_filter_check 0
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ptp4l_stop ${swp1}
+ ptp4l_stop ${h1}
+ phc2sys_stop
+ isochron_recv_stop
+
+ txtime_cleanup ${h1}
+
+ h2_destroy
+ h1_destroy
+ switch_destroy
+
+ vrf_cleanup
+}
+
+debug_incorrectly_dropped_packets()
+{
+ local isochron_dat=$1
+ local dropped_seqids
+ local seqid
+
+ echo "Packets incorrectly dropped:"
+
+ dropped_seqids=$(isochron report \
+ --input-file "${isochron_dat}" \
+ --printf-format "%u RX hw %T\n" \
+ --printf-args "qR" | \
+ grep 'RX hw 0.000000000' | \
+ awk '{print $1}')
+
+ for seqid in ${dropped_seqids}; do
+ isochron report \
+ --input-file "${isochron_dat}" \
+ --start ${seqid} --stop ${seqid} \
+ --printf-format "seqid %u scheduled for %T, HW TX timestamp %T\n" \
+ --printf-args "qST"
+ done
+}
+
+debug_incorrectly_received_packets()
+{
+ local isochron_dat=$1
+
+ echo "Packets incorrectly received:"
+
+ isochron report \
+ --input-file "${isochron_dat}" \
+ --printf-format "seqid %u scheduled for %T, HW TX timestamp %T, HW RX timestamp %T\n" \
+ --printf-args "qSTR" |
+ grep -v 'HW RX timestamp 0.000000000'
+}
+
+run_test()
+{
+ local base_time=$1
+ local expected=$2
+ local test_name=$3
+ local debug=$4
+ local isochron_dat="$(mktemp)"
+ local extra_args=""
+ local received
+
+ isochron_do \
+ "${h1}" \
+ "${h2}" \
+ "${UDS_ADDRESS_H1}" \
+ "" \
+ "${base_time}" \
+ "${CYCLE_TIME_NS}" \
+ "${SHIFT_TIME_NS}" \
+ "${NUM_PKTS}" \
+ "${STREAM_VID}" \
+ "${STREAM_PRIO}" \
+ "" \
+ "${isochron_dat}"
+
+ # Count all received packets by looking at the non-zero RX timestamps
+ received=$(isochron report \
+ --input-file "${isochron_dat}" \
+ --printf-format "%u\n" --printf-args "R" | \
+ grep -w -v '0' | wc -l)
+
+ if [ "${received}" = "${expected}" ]; then
+ RET=0
+ else
+ RET=1
+ echo "Expected isochron to receive ${expected} packets but received ${received}"
+ fi
+
+ log_test "${test_name}"
+
+ if [ "$RET" = "1" ]; then
+ ${debug} "${isochron_dat}"
+ fi
+
+ rm ${isochron_dat} 2> /dev/null
+}
+
+test_gate_in_band()
+{
+ # Send packets in-band with the OPEN gate entry
+ run_test 0.000000000 ${NUM_PKTS} "In band" \
+ debug_incorrectly_dropped_packets
+
+ psfp_filter_check ${NUM_PKTS}
+}
+
+test_gate_out_of_band()
+{
+ # Send packets in-band with the CLOSE gate entry
+ run_test 0.005000000 0 "Out of band" \
+ debug_incorrectly_received_packets
+
+ psfp_filter_check $((2 * ${NUM_PKTS}))
+}
+
+trap cleanup EXIT
+
+ALL_TESTS="
+ test_gate_in_band
+ test_gate_out_of_band
+"
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
index 10e54bcca7a9..9c79bbcce5a8 100755
--- a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
+++ b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
@@ -4,35 +4,17 @@
WAIT_TIME=1
NUM_NETIFS=4
+STABLE_MAC_ADDRS=yes
lib_dir=$(dirname $0)/../../../net/forwarding
source $lib_dir/tc_common.sh
source $lib_dir/lib.sh
require_command tcpdump
-#
-# +---------------------------------------------+
-# | DUT ports Generator ports |
-# | +--------+ +--------+ +--------+ +--------+ |
-# | | | | | | | | | |
-# | | eth0 | | eth1 | | eth2 | | eth3 | |
-# | | | | | | | | | |
-# +-+--------+-+--------+-+--------+-+--------+-+
-# | | | |
-# | | | |
-# | +-----------+ |
-# | |
-# +--------------------------------+
-
-eth0=${NETIFS[p1]}
-eth1=${NETIFS[p2]}
-eth2=${NETIFS[p3]}
-eth3=${NETIFS[p4]}
-
-eth0_mac="de:ad:be:ef:00:00"
-eth1_mac="de:ad:be:ef:00:01"
-eth2_mac="de:ad:be:ef:00:02"
-eth3_mac="de:ad:be:ef:00:03"
+h1=${NETIFS[p1]}
+swp1=${NETIFS[p2]}
+swp2=${NETIFS[p3]}
+h2=${NETIFS[p4]}
# Helpers to map a VCAP IS1 and VCAP IS2 lookup and policy to a chain number
# used by the kernel driver. The numbers are:
@@ -156,39 +138,39 @@ create_tcam_skeleton()
setup_prepare()
{
- ip link set $eth0 up
- ip link set $eth1 up
- ip link set $eth2 up
- ip link set $eth3 up
+ ip link set $swp1 up
+ ip link set $swp2 up
+ ip link set $h2 up
+ ip link set $h1 up
- create_tcam_skeleton $eth0
+ create_tcam_skeleton $swp1
ip link add br0 type bridge
- ip link set $eth0 master br0
- ip link set $eth1 master br0
+ ip link set $swp1 master br0
+ ip link set $swp2 master br0
ip link set br0 up
- ip link add link $eth3 name $eth3.100 type vlan id 100
- ip link set $eth3.100 up
+ ip link add link $h1 name $h1.100 type vlan id 100
+ ip link set $h1.100 up
- ip link add link $eth3 name $eth3.200 type vlan id 200
- ip link set $eth3.200 up
+ ip link add link $h1 name $h1.200 type vlan id 200
+ ip link set $h1.200 up
- tc filter add dev $eth0 ingress chain $(IS1 1) pref 1 \
+ tc filter add dev $swp1 ingress chain $(IS1 1) pref 1 \
protocol 802.1Q flower skip_sw vlan_id 100 \
action vlan pop \
action goto chain $(IS1 2)
- tc filter add dev $eth0 egress chain $(ES0) pref 1 \
- flower skip_sw indev $eth1 \
+ tc filter add dev $swp1 egress chain $(ES0) pref 1 \
+ flower skip_sw indev $swp2 \
action vlan push protocol 802.1Q id 100
- tc filter add dev $eth0 ingress chain $(IS1 0) pref 2 \
+ tc filter add dev $swp1 ingress chain $(IS1 0) pref 2 \
protocol ipv4 flower skip_sw src_ip 10.1.1.2 \
action skbedit priority 7 \
action goto chain $(IS1 1)
- tc filter add dev $eth0 ingress chain $(IS2 0 0) pref 1 \
+ tc filter add dev $swp1 ingress chain $(IS2 0 0) pref 1 \
protocol ipv4 flower skip_sw ip_proto udp dst_port 5201 \
action police rate 50mbit burst 64k conform-exceed drop/pipe \
action goto chain $(IS2 1 0)
@@ -196,150 +178,160 @@ setup_prepare()
cleanup()
{
- ip link del $eth3.200
- ip link del $eth3.100
- tc qdisc del dev $eth0 clsact
+ ip link del $h1.200
+ ip link del $h1.100
+ tc qdisc del dev $swp1 clsact
ip link del br0
}
test_vlan_pop()
{
- printf "Testing VLAN pop.. "
+ local h1_mac=$(mac_get $h1)
+ local h2_mac=$(mac_get $h2)
+
+ RET=0
- tcpdump_start $eth2
+ tcpdump_start $h2
# Work around Mausezahn VLAN builder bug
# (https://github.com/netsniff-ng/netsniff-ng/issues/225) by using
# an 8021q upper
- $MZ $eth3.100 -q -c 1 -p 64 -a $eth3_mac -b $eth2_mac -t ip
+ $MZ $h1.100 -q -c 1 -p 64 -a $h1_mac -b $h2_mac -t ip
sleep 1
- tcpdump_stop
+ tcpdump_stop $h2
- if tcpdump_show | grep -q "$eth3_mac > $eth2_mac, ethertype IPv4"; then
- echo "OK"
- else
- echo "FAIL"
- fi
+ tcpdump_show $h2 | grep -q "$h1_mac > $h2_mac, ethertype IPv4"
+ check_err "$?" "untagged reception"
+
+ tcpdump_cleanup $h2
- tcpdump_cleanup
+ log_test "VLAN pop"
}
test_vlan_push()
{
- printf "Testing VLAN push.. "
+ local h1_mac=$(mac_get $h1)
+ local h2_mac=$(mac_get $h2)
- tcpdump_start $eth3.100
+ RET=0
- $MZ $eth2 -q -c 1 -p 64 -a $eth2_mac -b $eth3_mac -t ip
+ tcpdump_start $h1.100
+
+ $MZ $h2 -q -c 1 -p 64 -a $h2_mac -b $h1_mac -t ip
sleep 1
- tcpdump_stop
+ tcpdump_stop $h1.100
- if tcpdump_show | grep -q "$eth2_mac > $eth3_mac"; then
- echo "OK"
- else
- echo "FAIL"
- fi
+ tcpdump_show $h1.100 | grep -q "$h2_mac > $h1_mac"
+ check_err "$?" "tagged reception"
- tcpdump_cleanup
+ tcpdump_cleanup $h1.100
+
+ log_test "VLAN push"
}
test_vlan_ingress_modify()
{
- printf "Testing ingress VLAN modification.. "
+ local h1_mac=$(mac_get $h1)
+ local h2_mac=$(mac_get $h2)
+
+ RET=0
ip link set br0 type bridge vlan_filtering 1
- bridge vlan add dev $eth0 vid 200
- bridge vlan add dev $eth0 vid 300
- bridge vlan add dev $eth1 vid 300
+ bridge vlan add dev $swp1 vid 200
+ bridge vlan add dev $swp1 vid 300
+ bridge vlan add dev $swp2 vid 300
- tc filter add dev $eth0 ingress chain $(IS1 2) pref 3 \
+ tc filter add dev $swp1 ingress chain $(IS1 2) pref 3 \
protocol 802.1Q flower skip_sw vlan_id 200 \
action vlan modify id 300 \
action goto chain $(IS2 0 0)
- tcpdump_start $eth2
+ tcpdump_start $h2
- $MZ $eth3.200 -q -c 1 -p 64 -a $eth3_mac -b $eth2_mac -t ip
+ $MZ $h1.200 -q -c 1 -p 64 -a $h1_mac -b $h2_mac -t ip
sleep 1
- tcpdump_stop
+ tcpdump_stop $h2
- if tcpdump_show | grep -q "$eth3_mac > $eth2_mac, .* vlan 300"; then
- echo "OK"
- else
- echo "FAIL"
- fi
+ tcpdump_show $h2 | grep -q "$h1_mac > $h2_mac, .* vlan 300"
+ check_err "$?" "tagged reception"
- tcpdump_cleanup
+ tcpdump_cleanup $h2
- tc filter del dev $eth0 ingress chain $(IS1 2) pref 3
+ tc filter del dev $swp1 ingress chain $(IS1 2) pref 3
- bridge vlan del dev $eth0 vid 200
- bridge vlan del dev $eth0 vid 300
- bridge vlan del dev $eth1 vid 300
+ bridge vlan del dev $swp1 vid 200
+ bridge vlan del dev $swp1 vid 300
+ bridge vlan del dev $swp2 vid 300
ip link set br0 type bridge vlan_filtering 0
+
+ log_test "Ingress VLAN modification"
}
test_vlan_egress_modify()
{
- printf "Testing egress VLAN modification.. "
+ local h1_mac=$(mac_get $h1)
+ local h2_mac=$(mac_get $h2)
- tc qdisc add dev $eth1 clsact
+ RET=0
+
+ tc qdisc add dev $swp2 clsact
ip link set br0 type bridge vlan_filtering 1
- bridge vlan add dev $eth0 vid 200
- bridge vlan add dev $eth1 vid 200
+ bridge vlan add dev $swp1 vid 200
+ bridge vlan add dev $swp2 vid 200
- tc filter add dev $eth1 egress chain $(ES0) pref 3 \
+ tc filter add dev $swp2 egress chain $(ES0) pref 3 \
protocol 802.1Q flower skip_sw vlan_id 200 vlan_prio 0 \
action vlan modify id 300 priority 7
- tcpdump_start $eth2
+ tcpdump_start $h2
- $MZ $eth3.200 -q -c 1 -p 64 -a $eth3_mac -b $eth2_mac -t ip
+ $MZ $h1.200 -q -c 1 -p 64 -a $h1_mac -b $h2_mac -t ip
sleep 1
- tcpdump_stop
+ tcpdump_stop $h2
- if tcpdump_show | grep -q "$eth3_mac > $eth2_mac, .* vlan 300"; then
- echo "OK"
- else
- echo "FAIL"
- fi
+ tcpdump_show $h2 | grep -q "$h1_mac > $h2_mac, .* vlan 300"
+ check_err "$?" "tagged reception"
- tcpdump_cleanup
+ tcpdump_cleanup $h2
- tc filter del dev $eth1 egress chain $(ES0) pref 3
- tc qdisc del dev $eth1 clsact
+ tc filter del dev $swp2 egress chain $(ES0) pref 3
+ tc qdisc del dev $swp2 clsact
- bridge vlan del dev $eth0 vid 200
- bridge vlan del dev $eth1 vid 200
+ bridge vlan del dev $swp1 vid 200
+ bridge vlan del dev $swp2 vid 200
ip link set br0 type bridge vlan_filtering 0
+
+ log_test "Egress VLAN modification"
}
test_skbedit_priority()
{
+ local h1_mac=$(mac_get $h1)
+ local h2_mac=$(mac_get $h2)
local num_pkts=100
- printf "Testing frame prioritization.. "
+ before=$(ethtool_stats_get $swp1 'rx_green_prio_7')
- before=$(ethtool_stats_get $eth0 'rx_green_prio_7')
+ $MZ $h1 -q -c $num_pkts -p 64 -a $h1_mac -b $h2_mac -t ip -A 10.1.1.2
- $MZ $eth3 -q -c $num_pkts -p 64 -a $eth3_mac -b $eth2_mac -t ip -A 10.1.1.2
-
- after=$(ethtool_stats_get $eth0 'rx_green_prio_7')
+ after=$(ethtool_stats_get $swp1 'rx_green_prio_7')
if [ $((after - before)) = $num_pkts ]; then
- echo "OK"
+ RET=0
else
- echo "FAIL"
+ RET=1
fi
+
+ log_test "Frame prioritization"
}
trap cleanup EXIT
diff --git a/tools/testing/selftests/drivers/s390x/uvdevice/Makefile b/tools/testing/selftests/drivers/s390x/uvdevice/Makefile
new file mode 100644
index 000000000000..891215a7dc8a
--- /dev/null
+++ b/tools/testing/selftests/drivers/s390x/uvdevice/Makefile
@@ -0,0 +1,21 @@
+include ../../../../../build/Build.include
+
+UNAME_M := $(shell uname -m)
+
+ifneq ($(UNAME_M),s390x)
+nothing:
+.PHONY: all clean run_tests install
+.SILENT:
+else
+
+TEST_GEN_PROGS := test_uvdevice
+
+top_srcdir ?= ../../../../../..
+khdr_dir = $(top_srcdir)/usr/include
+LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
+
+CFLAGS += -Wall -Werror -static -I$(khdr_dir) -I$(LINUX_TOOL_ARCH_INCLUDE)
+
+include ../../../lib.mk
+
+endif
diff --git a/tools/testing/selftests/drivers/s390x/uvdevice/config b/tools/testing/selftests/drivers/s390x/uvdevice/config
new file mode 100644
index 000000000000..f28a04b99eff
--- /dev/null
+++ b/tools/testing/selftests/drivers/s390x/uvdevice/config
@@ -0,0 +1 @@
+CONFIG_S390_UV_UAPI=y
diff --git a/tools/testing/selftests/drivers/s390x/uvdevice/test_uvdevice.c b/tools/testing/selftests/drivers/s390x/uvdevice/test_uvdevice.c
new file mode 100644
index 000000000000..ea0cdc37b44f
--- /dev/null
+++ b/tools/testing/selftests/drivers/s390x/uvdevice/test_uvdevice.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * selftest for the Ultravisor UAPI device
+ *
+ * Copyright IBM Corp. 2022
+ * Author(s): Steffen Eiden <seiden@linux.ibm.com>
+ */
+
+#include <stdint.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include <asm/uvdevice.h>
+
+#include "../../../kselftest_harness.h"
+
+#define UV_PATH "/dev/uv"
+#define BUFFER_SIZE 0x200
+FIXTURE(uvio_fixture) {
+ int uv_fd;
+ struct uvio_ioctl_cb uvio_ioctl;
+ uint8_t buffer[BUFFER_SIZE];
+ __u64 fault_page;
+};
+
+FIXTURE_VARIANT(uvio_fixture) {
+ unsigned long ioctl_cmd;
+ uint32_t arg_size;
+};
+
+FIXTURE_VARIANT_ADD(uvio_fixture, att) {
+ .ioctl_cmd = UVIO_IOCTL_ATT,
+ .arg_size = sizeof(struct uvio_attest),
+};
+
+FIXTURE_SETUP(uvio_fixture)
+{
+ self->uv_fd = open(UV_PATH, O_ACCMODE);
+
+ self->uvio_ioctl.argument_addr = (__u64)self->buffer;
+ self->uvio_ioctl.argument_len = variant->arg_size;
+ self->fault_page =
+ (__u64)mmap(NULL, (size_t)getpagesize(), PROT_NONE, MAP_ANONYMOUS, -1, 0);
+}
+
+FIXTURE_TEARDOWN(uvio_fixture)
+{
+ if (self->uv_fd)
+ close(self->uv_fd);
+ munmap((void *)self->fault_page, (size_t)getpagesize());
+}
+
+TEST_F(uvio_fixture, fault_ioctl_arg)
+{
+ int rc, errno_cache;
+
+ rc = ioctl(self->uv_fd, variant->ioctl_cmd, NULL);
+ errno_cache = errno;
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno_cache, EFAULT);
+
+ rc = ioctl(self->uv_fd, variant->ioctl_cmd, self->fault_page);
+ errno_cache = errno;
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno_cache, EFAULT);
+}
+
+TEST_F(uvio_fixture, fault_uvio_arg)
+{
+ int rc, errno_cache;
+
+ self->uvio_ioctl.argument_addr = 0;
+ rc = ioctl(self->uv_fd, variant->ioctl_cmd, &self->uvio_ioctl);
+ errno_cache = errno;
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno_cache, EFAULT);
+
+ self->uvio_ioctl.argument_addr = self->fault_page;
+ rc = ioctl(self->uv_fd, variant->ioctl_cmd, &self->uvio_ioctl);
+ errno_cache = errno;
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno_cache, EFAULT);
+}
+
+/*
+ * Test to verify that IOCTLs with invalid values in the ioctl_control block
+ * are rejected.
+ */
+TEST_F(uvio_fixture, inval_ioctl_cb)
+{
+ int rc, errno_cache;
+
+ self->uvio_ioctl.argument_len = 0;
+ rc = ioctl(self->uv_fd, variant->ioctl_cmd, &self->uvio_ioctl);
+ errno_cache = errno;
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno_cache, EINVAL);
+
+ self->uvio_ioctl.argument_len = (uint32_t)-1;
+ rc = ioctl(self->uv_fd, variant->ioctl_cmd, &self->uvio_ioctl);
+ errno_cache = errno;
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno_cache, EINVAL);
+ self->uvio_ioctl.argument_len = variant->arg_size;
+
+ self->uvio_ioctl.flags = (uint32_t)-1;
+ rc = ioctl(self->uv_fd, variant->ioctl_cmd, &self->uvio_ioctl);
+ errno_cache = errno;
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno_cache, EINVAL);
+ self->uvio_ioctl.flags = 0;
+
+ memset(self->uvio_ioctl.reserved14, 0xff, sizeof(self->uvio_ioctl.reserved14));
+ rc = ioctl(self->uv_fd, variant->ioctl_cmd, &self->uvio_ioctl);
+ errno_cache = errno;
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno_cache, EINVAL);
+
+ memset(&self->uvio_ioctl, 0x11, sizeof(self->uvio_ioctl));
+ rc = ioctl(self->uv_fd, variant->ioctl_cmd, &self->uvio_ioctl);
+ ASSERT_EQ(rc, -1);
+}
+
+TEST_F(uvio_fixture, inval_ioctl_cmd)
+{
+ int rc, errno_cache;
+ uint8_t nr = _IOC_NR(variant->ioctl_cmd);
+ unsigned long cmds[] = {
+ _IOWR('a', nr, struct uvio_ioctl_cb),
+ _IOWR(UVIO_TYPE_UVC, nr, int),
+ _IO(UVIO_TYPE_UVC, nr),
+ _IOR(UVIO_TYPE_UVC, nr, struct uvio_ioctl_cb),
+ _IOW(UVIO_TYPE_UVC, nr, struct uvio_ioctl_cb),
+ };
+
+ for (size_t i = 0; i < ARRAY_SIZE(cmds); i++) {
+ rc = ioctl(self->uv_fd, cmds[i], &self->uvio_ioctl);
+ errno_cache = errno;
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno_cache, ENOTTY);
+ }
+}
+
+struct test_attest_buffer {
+ uint8_t arcb[0x180];
+ uint8_t meas[64];
+ uint8_t add[32];
+};
+
+FIXTURE(attest_fixture) {
+ int uv_fd;
+ struct uvio_ioctl_cb uvio_ioctl;
+ struct uvio_attest uvio_attest;
+ struct test_attest_buffer attest_buffer;
+ __u64 fault_page;
+};
+
+FIXTURE_SETUP(attest_fixture)
+{
+ self->uv_fd = open(UV_PATH, O_ACCMODE);
+
+ self->uvio_ioctl.argument_addr = (__u64)&self->uvio_attest;
+ self->uvio_ioctl.argument_len = sizeof(self->uvio_attest);
+
+ self->uvio_attest.arcb_addr = (__u64)&self->attest_buffer.arcb;
+ self->uvio_attest.arcb_len = sizeof(self->attest_buffer.arcb);
+
+ self->uvio_attest.meas_addr = (__u64)&self->attest_buffer.meas;
+ self->uvio_attest.meas_len = sizeof(self->attest_buffer.meas);
+
+ self->uvio_attest.add_data_addr = (__u64)&self->attest_buffer.add;
+ self->uvio_attest.add_data_len = sizeof(self->attest_buffer.add);
+ self->fault_page =
+ (__u64)mmap(NULL, (size_t)getpagesize(), PROT_NONE, MAP_ANONYMOUS, -1, 0);
+}
+
+FIXTURE_TEARDOWN(attest_fixture)
+{
+ if (self->uv_fd)
+ close(self->uv_fd);
+ munmap((void *)self->fault_page, (size_t)getpagesize());
+}
+
+static void att_inval_sizes_test(uint32_t *size, uint32_t max_size, bool test_zero,
+ struct __test_metadata *_metadata,
+ FIXTURE_DATA(attest_fixture) *self)
+{
+ int rc, errno_cache;
+ uint32_t tmp = *size;
+
+ if (test_zero) {
+ *size = 0;
+ rc = ioctl(self->uv_fd, UVIO_IOCTL_ATT, &self->uvio_ioctl);
+ errno_cache = errno;
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno_cache, EINVAL);
+ }
+ *size = max_size + 1;
+ rc = ioctl(self->uv_fd, UVIO_IOCTL_ATT, &self->uvio_ioctl);
+ errno_cache = errno;
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno_cache, EINVAL);
+ *size = tmp;
+}
+
+/*
+ * Test to verify that attestation IOCTLs with invalid values in the UVIO
+ * attestation control block are rejected.
+ */
+TEST_F(attest_fixture, att_inval_request)
+{
+ int rc, errno_cache;
+
+ att_inval_sizes_test(&self->uvio_attest.add_data_len, UVIO_ATT_ADDITIONAL_MAX_LEN,
+ false, _metadata, self);
+ att_inval_sizes_test(&self->uvio_attest.meas_len, UVIO_ATT_MEASUREMENT_MAX_LEN,
+ true, _metadata, self);
+ att_inval_sizes_test(&self->uvio_attest.arcb_len, UVIO_ATT_ARCB_MAX_LEN,
+ true, _metadata, self);
+
+ self->uvio_attest.reserved136 = (uint16_t)-1;
+ rc = ioctl(self->uv_fd, UVIO_IOCTL_ATT, &self->uvio_ioctl);
+ errno_cache = errno;
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno_cache, EINVAL);
+
+ memset(&self->uvio_attest, 0x11, sizeof(self->uvio_attest));
+ rc = ioctl(self->uv_fd, UVIO_IOCTL_ATT, &self->uvio_ioctl);
+ ASSERT_EQ(rc, -1);
+}
+
+static void att_inval_addr_test(__u64 *addr, struct __test_metadata *_metadata,
+ FIXTURE_DATA(attest_fixture) *self)
+{
+ int rc, errno_cache;
+ __u64 tmp = *addr;
+
+ *addr = 0;
+ rc = ioctl(self->uv_fd, UVIO_IOCTL_ATT, &self->uvio_ioctl);
+ errno_cache = errno;
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno_cache, EFAULT);
+ *addr = self->fault_page;
+ rc = ioctl(self->uv_fd, UVIO_IOCTL_ATT, &self->uvio_ioctl);
+ errno_cache = errno;
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno_cache, EFAULT);
+ *addr = tmp;
+}
+
+TEST_F(attest_fixture, att_inval_addr)
+{
+ att_inval_addr_test(&self->uvio_attest.arcb_addr, _metadata, self);
+ att_inval_addr_test(&self->uvio_attest.add_data_addr, _metadata, self);
+ att_inval_addr_test(&self->uvio_attest.meas_addr, _metadata, self);
+}
+
+static void __attribute__((constructor)) __constructor_order_last(void)
+{
+ if (!__constructor_order)
+ __constructor_order = _CONSTRUCTOR_ORDER_BACKWARD;
+}
+
+int main(int argc, char **argv)
+{
+ int fd = open(UV_PATH, O_ACCMODE);
+
+ if (fd < 0)
+ ksft_exit_skip("No uv-device or cannot access " UV_PATH "\n"
+ "Enable CONFIG_S390_UV_UAPI and check the access rights on "
+ UV_PATH ".\n");
+ close(fd);
+ return test_harness_run(argc, argv);
+}
diff --git a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
index 0315955ff0f4..5f362c0fd890 100644
--- a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
+++ b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
@@ -64,6 +64,7 @@ static int __do_binderfs_test(struct __test_metadata *_metadata)
device_path[sizeof(P_tmpdir "/binderfs_XXXXXX/") + BINDERFS_MAX_NAME];
static const char * const binder_features[] = {
"oneway_spam_detection",
+ "extended_error",
};
change_mountns(_metadata);
@@ -412,7 +413,8 @@ TEST(binderfs_stress)
ret = mount(NULL, binderfs_mntpt, "binder", 0, 0);
ASSERT_EQ(ret, 0) {
- TH_LOG("%s - Failed to mount binderfs", strerror(errno));
+ TH_LOG("%s - Failed to mount binderfs, check if CONFIG_ANDROID_BINDERFS is enabled in the running kernel",
+ strerror(errno));
}
for (int i = 0; i < ARRAY_SIZE(fds); i++) {
diff --git a/tools/testing/selftests/filesystems/binderfs/config b/tools/testing/selftests/filesystems/binderfs/config
index 02dd6cc9cf99..7b4fc6ee6205 100644
--- a/tools/testing/selftests/filesystems/binderfs/config
+++ b/tools/testing/selftests/filesystems/binderfs/config
@@ -1,3 +1,2 @@
-CONFIG_ANDROID=y
CONFIG_ANDROID_BINDERFS=y
CONFIG_ANDROID_BINDER_IPC=y
diff --git a/tools/testing/selftests/firmware/Makefile b/tools/testing/selftests/firmware/Makefile
index 40211cd8f0e6..7992969deaa2 100644
--- a/tools/testing/selftests/firmware/Makefile
+++ b/tools/testing/selftests/firmware/Makefile
@@ -4,7 +4,7 @@ CFLAGS = -Wall \
-O2
TEST_PROGS := fw_run_tests.sh
-TEST_FILES := fw_fallback.sh fw_filesystem.sh fw_lib.sh
+TEST_FILES := fw_fallback.sh fw_filesystem.sh fw_upload.sh fw_lib.sh
TEST_GEN_FILES := fw_namespace
include ../lib.mk
diff --git a/tools/testing/selftests/firmware/config b/tools/testing/selftests/firmware/config
index bf634dda0720..6e402519b117 100644
--- a/tools/testing/selftests/firmware/config
+++ b/tools/testing/selftests/firmware/config
@@ -3,3 +3,4 @@ CONFIG_FW_LOADER=y
CONFIG_FW_LOADER_USER_HELPER=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
+CONFIG_FW_UPLOAD=y
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index c2a2a100114b..1a99aea0549e 100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -11,6 +11,9 @@ TEST_REQS_FW_SET_CUSTOM_PATH="yes"
TEST_DIR=$(dirname $0)
source $TEST_DIR/fw_lib.sh
+RUN_XZ="xz -C crc32 --lzma2=dict=2MiB"
+RUN_ZSTD="zstd -q"
+
check_mods
check_setup
verify_reqs
@@ -211,7 +214,7 @@ read_firmwares()
else
fwfile="$FW"
fi
- if [ "$1" = "xzonly" ]; then
+ if [ "$1" = "componly" ]; then
fwfile="${fwfile}-orig"
fi
for i in $(seq 0 3); do
@@ -235,7 +238,7 @@ read_partial_firmwares()
fwfile="${FW}"
fi
- if [ "$1" = "xzonly" ]; then
+ if [ "$1" = "componly" ]; then
fwfile="${fwfile}-orig"
fi
@@ -409,10 +412,8 @@ test_request_firmware_nowait_custom()
config_unset_uevent
RANDOM_FILE_PATH=$(setup_random_file)
RANDOM_FILE="$(basename $RANDOM_FILE_PATH)"
- if [ "$2" = "both" ]; then
- xz -9 -C crc32 -k $RANDOM_FILE_PATH
- elif [ "$2" = "xzonly" ]; then
- xz -9 -C crc32 $RANDOM_FILE_PATH
+ if [ -n "$2" -a "$2" != "normal" ]; then
+ compress_"$2"_"$COMPRESS_FORMAT" $RANDOM_FILE_PATH
fi
config_set_name $RANDOM_FILE
config_trigger_async
@@ -435,6 +436,32 @@ test_request_partial_firmware_into_buf()
echo "OK"
}
+do_tests ()
+{
+ mode="$1"
+ suffix="$2"
+
+ for i in $(seq 1 5); do
+ test_batched_request_firmware$suffix $i $mode
+ done
+
+ for i in $(seq 1 5); do
+ test_batched_request_firmware_into_buf$suffix $i $mode
+ done
+
+ for i in $(seq 1 5); do
+ test_batched_request_firmware_direct$suffix $i $mode
+ done
+
+ for i in $(seq 1 5); do
+ test_request_firmware_nowait_uevent$suffix $i $mode
+ done
+
+ for i in $(seq 1 5); do
+ test_request_firmware_nowait_custom$suffix $i $mode
+ done
+}
+
# Only continue if batched request triggers are present on the
# test-firmware driver
test_config_present
@@ -442,25 +469,7 @@ test_config_present
# test with the file present
echo
echo "Testing with the file present..."
-for i in $(seq 1 5); do
- test_batched_request_firmware $i normal
-done
-
-for i in $(seq 1 5); do
- test_batched_request_firmware_into_buf $i normal
-done
-
-for i in $(seq 1 5); do
- test_batched_request_firmware_direct $i normal
-done
-
-for i in $(seq 1 5); do
- test_request_firmware_nowait_uevent $i normal
-done
-
-for i in $(seq 1 5); do
- test_request_firmware_nowait_custom $i normal
-done
+do_tests normal
# Partial loads cannot use fallback, so do not repeat tests.
test_request_partial_firmware_into_buf 0 10
@@ -472,25 +481,7 @@ test_request_partial_firmware_into_buf 2 10
# a hung task, which would require a hard reset.
echo
echo "Testing with the file missing..."
-for i in $(seq 1 5); do
- test_batched_request_firmware_nofile $i
-done
-
-for i in $(seq 1 5); do
- test_batched_request_firmware_into_buf_nofile $i
-done
-
-for i in $(seq 1 5); do
- test_batched_request_firmware_direct_nofile $i
-done
-
-for i in $(seq 1 5); do
- test_request_firmware_nowait_uevent_nofile $i
-done
-
-for i in $(seq 1 5); do
- test_request_firmware_nowait_custom_nofile $i
-done
+do_tests nofile _nofile
# Partial loads cannot use fallback, so do not repeat tests.
test_request_partial_firmware_into_buf_nofile 0 10
@@ -498,55 +489,58 @@ test_request_partial_firmware_into_buf_nofile 0 5
test_request_partial_firmware_into_buf_nofile 1 6
test_request_partial_firmware_into_buf_nofile 2 10
-test "$HAS_FW_LOADER_COMPRESS" != "yes" && exit 0
+test_request_firmware_compressed ()
+{
+ export COMPRESS_FORMAT="$1"
-# test with both files present
-xz -9 -C crc32 -k $FW
-config_set_name $NAME
-echo
-echo "Testing with both plain and xz files present..."
-for i in $(seq 1 5); do
- test_batched_request_firmware $i both
-done
+ # test with both files present
+ compress_both_"$COMPRESS_FORMAT" $FW
+ compress_both_"$COMPRESS_FORMAT" $FW_INTO_BUF
-for i in $(seq 1 5); do
- test_batched_request_firmware_into_buf $i both
-done
+ config_set_name $NAME
+ echo
+ echo "Testing with both plain and $COMPRESS_FORMAT files present..."
+ do_tests both
-for i in $(seq 1 5); do
- test_batched_request_firmware_direct $i both
-done
+ # test with only compressed file present
+ mv "$FW" "${FW}-orig"
+ mv "$FW_INTO_BUF" "${FW_INTO_BUF}-orig"
-for i in $(seq 1 5); do
- test_request_firmware_nowait_uevent $i both
-done
+ config_set_name $NAME
+ echo
+ echo "Testing with only $COMPRESS_FORMAT file present..."
+ do_tests componly
-for i in $(seq 1 5); do
- test_request_firmware_nowait_custom $i both
-done
+ mv "${FW}-orig" "$FW"
+ mv "${FW_INTO_BUF}-orig" "$FW_INTO_BUF"
+}
-# test with only xz file present
-mv "$FW" "${FW}-orig"
-echo
-echo "Testing with only xz file present..."
-for i in $(seq 1 5); do
- test_batched_request_firmware $i xzonly
-done
-
-for i in $(seq 1 5); do
- test_batched_request_firmware_into_buf $i xzonly
-done
-
-for i in $(seq 1 5); do
- test_batched_request_firmware_direct $i xzonly
-done
-
-for i in $(seq 1 5); do
- test_request_firmware_nowait_uevent $i xzonly
-done
-
-for i in $(seq 1 5); do
- test_request_firmware_nowait_custom $i xzonly
-done
+compress_both_XZ ()
+{
+ $RUN_XZ -k "$@"
+}
+
+compress_componly_XZ ()
+{
+ $RUN_XZ "$@"
+}
+
+compress_both_ZSTD ()
+{
+ $RUN_ZSTD -k "$@"
+}
+
+compress_componly_ZSTD ()
+{
+ $RUN_ZSTD --rm "$@"
+}
+
+if test "$HAS_FW_LOADER_COMPRESS_XZ" = "yes"; then
+ test_request_firmware_compressed XZ
+fi
+
+if test "$HAS_FW_LOADER_COMPRESS_ZSTD" = "yes"; then
+ test_request_firmware_compressed ZSTD
+fi
exit 0
diff --git a/tools/testing/selftests/firmware/fw_lib.sh b/tools/testing/selftests/firmware/fw_lib.sh
index 5b8c0fedee76..7bffd67800bf 100755
--- a/tools/testing/selftests/firmware/fw_lib.sh
+++ b/tools/testing/selftests/firmware/fw_lib.sh
@@ -62,7 +62,9 @@ check_setup()
{
HAS_FW_LOADER_USER_HELPER="$(kconfig_has CONFIG_FW_LOADER_USER_HELPER=y)"
HAS_FW_LOADER_USER_HELPER_FALLBACK="$(kconfig_has CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y)"
- HAS_FW_LOADER_COMPRESS="$(kconfig_has CONFIG_FW_LOADER_COMPRESS=y)"
+ HAS_FW_LOADER_COMPRESS_XZ="$(kconfig_has CONFIG_FW_LOADER_COMPRESS_XZ=y)"
+ HAS_FW_LOADER_COMPRESS_ZSTD="$(kconfig_has CONFIG_FW_LOADER_COMPRESS_ZSTD=y)"
+ HAS_FW_UPLOAD="$(kconfig_has CONFIG_FW_UPLOAD=y)"
PROC_FW_IGNORE_SYSFS_FALLBACK="0"
PROC_FW_FORCE_SYSFS_FALLBACK="0"
@@ -98,9 +100,14 @@ check_setup()
OLD_FWPATH="$(cat /sys/module/firmware_class/parameters/path)"
- if [ "$HAS_FW_LOADER_COMPRESS" = "yes" ]; then
+ if [ "$HAS_FW_LOADER_COMPRESS_XZ" = "yes" ]; then
if ! which xz 2> /dev/null > /dev/null; then
- HAS_FW_LOADER_COMPRESS=""
+ HAS_FW_LOADER_COMPRESS_XZ=""
+ fi
+ fi
+ if [ "$HAS_FW_LOADER_COMPRESS_ZSTD" = "yes" ]; then
+ if ! which zstd 2> /dev/null > /dev/null; then
+ HAS_FW_LOADER_COMPRESS_ZSTD=""
fi
fi
}
@@ -113,6 +120,12 @@ verify_reqs()
exit 0
fi
fi
+ if [ "$TEST_REQS_FW_UPLOAD" = "yes" ]; then
+ if [ ! "$HAS_FW_UPLOAD" = "yes" ]; then
+ echo "firmware upload disabled so ignoring test"
+ exit 0
+ fi
+ fi
}
setup_tmp_file()
diff --git a/tools/testing/selftests/firmware/fw_run_tests.sh b/tools/testing/selftests/firmware/fw_run_tests.sh
index 777377078d5e..f6d95a2d5124 100755
--- a/tools/testing/selftests/firmware/fw_run_tests.sh
+++ b/tools/testing/selftests/firmware/fw_run_tests.sh
@@ -22,6 +22,10 @@ run_tests()
proc_set_force_sysfs_fallback $1
proc_set_ignore_sysfs_fallback $2
$TEST_DIR/fw_fallback.sh
+
+ proc_set_force_sysfs_fallback $1
+ proc_set_ignore_sysfs_fallback $2
+ $TEST_DIR/fw_upload.sh
}
run_test_config_0001()
diff --git a/tools/testing/selftests/firmware/fw_upload.sh b/tools/testing/selftests/firmware/fw_upload.sh
new file mode 100755
index 000000000000..c7a6f06c9adb
--- /dev/null
+++ b/tools/testing/selftests/firmware/fw_upload.sh
@@ -0,0 +1,214 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# This validates the user-initiated fw upload mechanism of the firmware
+# loader. It verifies that one or more firmware devices can be created
+# for a device driver. It also verifies the data transfer, the
+# cancellation support, and the error flows.
+set -e
+
+TEST_REQS_FW_UPLOAD="yes"
+TEST_DIR=$(dirname $0)
+
+progress_states="preparing transferring programming"
+errors="hw-error
+ timeout
+ device-busy
+ invalid-file-size
+ read-write-error
+ flash-wearout"
+error_abort="user-abort"
+fwname1=fw1
+fwname2=fw2
+fwname3=fw3
+
+source $TEST_DIR/fw_lib.sh
+
+check_mods
+check_setup
+verify_reqs
+
+trap "upload_finish" EXIT
+
+upload_finish() {
+ local fwdevs="$fwname1 $fwname2 $fwname3"
+
+ for name in $fwdevs; do
+ if [ -e "$DIR/$name" ]; then
+ echo -n "$name" > "$DIR"/upload_unregister
+ fi
+ done
+}
+
+upload_fw() {
+ local name="$1"
+ local file="$2"
+
+ echo 1 > "$DIR"/"$name"/loading
+ cat "$file" > "$DIR"/"$name"/data
+ echo 0 > "$DIR"/"$name"/loading
+}
+
+verify_fw() {
+ local name="$1"
+ local file="$2"
+
+ echo -n "$name" > "$DIR"/config_upload_name
+ if ! cmp "$file" "$DIR"/upload_read > /dev/null 2>&1; then
+ echo "$0: firmware compare for $name did not match" >&2
+ exit 1
+ fi
+
+ echo "$0: firmware upload for $name works" >&2
+ return 0
+}
+
+inject_error() {
+ local name="$1"
+ local status="$2"
+ local error="$3"
+
+ echo 1 > "$DIR"/"$name"/loading
+ echo -n "inject":"$status":"$error" > "$DIR"/"$name"/data
+ echo 0 > "$DIR"/"$name"/loading
+}
+
+await_status() {
+ local name="$1"
+ local expected="$2"
+ local status
+ local i
+
+ let i=0
+ while [ $i -lt 50 ]; do
+ status=$(cat "$DIR"/"$name"/status)
+ if [ "$status" = "$expected" ]; then
+ return 0;
+ fi
+ sleep 1e-03
+ let i=$i+1
+ done
+
+ echo "$0: Invalid status: Expected $expected, Actual $status" >&2
+ return 1;
+}
+
+await_idle() {
+ local name="$1"
+
+ await_status "$name" "idle"
+ return $?
+}
+
+expect_error() {
+ local name="$1"
+ local expected="$2"
+ local error=$(cat "$DIR"/"$name"/error)
+
+ if [ "$error" != "$expected" ]; then
+ echo "Invalid error: Expected $expected, Actual $error" >&2
+ return 1
+ fi
+
+ return 0
+}
+
+random_firmware() {
+ local bs="$1"
+ local count="$2"
+ local file=$(mktemp -p /tmp uploadfwXXX.bin)
+
+ dd if=/dev/urandom of="$file" bs="$bs" count="$count" > /dev/null 2>&1
+ echo "$file"
+}
+
+test_upload_cancel() {
+ local name="$1"
+ local status
+
+ for status in $progress_states; do
+ inject_error $name $status $error_abort
+ if ! await_status $name $status; then
+ exit 1
+ fi
+
+ echo 1 > "$DIR"/"$name"/cancel
+
+ if ! await_idle $name; then
+ exit 1
+ fi
+
+ if ! expect_error $name "$status":"$error_abort"; then
+ exit 1
+ fi
+ done
+
+ echo "$0: firmware upload cancellation works"
+ return 0
+}
+
+test_error_handling() {
+ local name=$1
+ local status
+ local error
+
+ for status in $progress_states; do
+ for error in $errors; do
+ inject_error $name $status $error
+
+ if ! await_idle $name; then
+ exit 1
+ fi
+
+ if ! expect_error $name "$status":"$error"; then
+ exit 1
+ fi
+
+ done
+ done
+ echo "$0: firmware upload error handling works"
+}
+
+test_fw_too_big() {
+ local name=$1
+ local fw_too_big=`random_firmware 512 5`
+ local expected="preparing:invalid-file-size"
+
+ upload_fw $name $fw_too_big
+ rm -f $fw_too_big
+
+ if ! await_idle $name; then
+ exit 1
+ fi
+
+ if ! expect_error $name $expected; then
+ exit 1
+ fi
+
+ echo "$0: oversized firmware error handling works"
+}
+
+echo -n "$fwname1" > "$DIR"/upload_register
+echo -n "$fwname2" > "$DIR"/upload_register
+echo -n "$fwname3" > "$DIR"/upload_register
+
+test_upload_cancel $fwname1
+test_error_handling $fwname1
+test_fw_too_big $fwname1
+
+fw_file1=`random_firmware 512 4`
+fw_file2=`random_firmware 512 3`
+fw_file3=`random_firmware 512 2`
+
+upload_fw $fwname1 $fw_file1
+upload_fw $fwname2 $fw_file2
+upload_fw $fwname3 $fw_file3
+
+verify_fw ${fwname1} ${fw_file1}
+verify_fw ${fwname2} ${fw_file2}
+verify_fw ${fwname3} ${fw_file3}
+
+echo -n "$fwname1" > "$DIR"/upload_unregister
+echo -n "$fwname2" > "$DIR"/upload_unregister
+echo -n "$fwname3" > "$DIR"/upload_unregister
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
index dc7ade196798..459741565222 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
@@ -25,6 +25,9 @@ ppc*)
s390*)
ARG1=%r2
;;
+mips*)
+ ARG1=%r4
+;;
*)
echo "Please implement other architecture here"
exit_untested
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
index 47d84b5cb6ca..d4662c8cf407 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
@@ -36,6 +36,10 @@ s390*)
GOODREG=%r2
BADREG=%s2
;;
+mips*)
+ GOODREG=%r4
+ BADREG=%r12
+;;
*)
echo "Please implement other architecture here"
exit_untested
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
index 312d23780096..be754f5bcf79 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
@@ -25,6 +25,8 @@ if [ $L -ne 256 ]; then
exit_fail
fi
+cat kprobe_events >> $testlog
+
echo 1 > events/kprobes/enable
echo 0 > events/kprobes/enable
echo > kprobe_events
diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
index b8152c573e8a..732149011692 100644
--- a/tools/testing/selftests/futex/functional/Makefile
+++ b/tools/testing/selftests/futex/functional/Makefile
@@ -22,7 +22,6 @@ TEST_GEN_FILES := \
TEST_PROGS := run.sh
top_srcdir = ../../../../..
-KSFT_KHDR_INSTALL := 1
DEFAULT_INSTALL_HDR_PATH := 1
include ../../lib.mk
diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile
index 71b306602368..616ed4019655 100644
--- a/tools/testing/selftests/gpio/Makefile
+++ b/tools/testing/selftests/gpio/Makefile
@@ -3,6 +3,6 @@
TEST_PROGS := gpio-mockup.sh gpio-sim.sh
TEST_FILES := gpio-mockup-sysfs.sh
TEST_GEN_PROGS_EXTENDED := gpio-mockup-cdev gpio-chip-info gpio-line-name
-CFLAGS += -O2 -g -Wall -I../../../../usr/include/
+CFLAGS += -O2 -g -Wall -I../../../../usr/include/ $(KHDR_INCLUDES)
include ../lib.mk
diff --git a/tools/testing/selftests/ir/ir_loopback.sh b/tools/testing/selftests/ir/ir_loopback.sh
index b90dc9939f45..aff9299c9416 100755
--- a/tools/testing/selftests/ir/ir_loopback.sh
+++ b/tools/testing/selftests/ir/ir_loopback.sh
@@ -10,7 +10,7 @@ if [ $UID != 0 ]; then
fi
if ! /sbin/modprobe -q -n rc-loopback; then
- echo "ir_loopback: module rc-loopback is not found [SKIP]"
+ echo "ir_loopback: module rc-loopback is not found in /lib/modules/`uname -r` [SKIP]"
exit $ksft_skip
fi
diff --git a/tools/testing/selftests/kcmp/kcmp_test.c b/tools/testing/selftests/kcmp/kcmp_test.c
index 6ea7b9f37a41..25110c7c0b3e 100644
--- a/tools/testing/selftests/kcmp/kcmp_test.c
+++ b/tools/testing/selftests/kcmp/kcmp_test.c
@@ -88,6 +88,9 @@ int main(int argc, char **argv)
int pid2 = getpid();
int ret;
+ ksft_print_header();
+ ksft_set_plan(3);
+
fd2 = open(kpath, O_RDWR, 0644);
if (fd2 < 0) {
perror("Can't open file");
@@ -152,7 +155,6 @@ int main(int argc, char **argv)
ksft_inc_pass_cnt();
}
- ksft_print_cnts();
if (ret)
ksft_exit_fail();
@@ -162,5 +164,5 @@ int main(int argc, char **argv)
waitpid(pid2, &status, P_ALL);
- return ksft_exit_pass();
+ return 0;
}
diff --git a/tools/testing/selftests/kexec/kexec_common_lib.sh b/tools/testing/selftests/kexec/kexec_common_lib.sh
index 0e114b34d5d7..641ef05863b2 100755
--- a/tools/testing/selftests/kexec/kexec_common_lib.sh
+++ b/tools/testing/selftests/kexec/kexec_common_lib.sh
@@ -65,32 +65,6 @@ get_efivarfs_secureboot_mode()
return 0;
}
-get_efi_var_secureboot_mode()
-{
- local efi_vars
- local secure_boot_file
- local setup_mode_file
- local secureboot_mode
- local setup_mode
-
- if [ ! -d "$efi_vars" ]; then
- log_skip "efi_vars is not enabled\n"
- fi
- secure_boot_file=$(find "$efi_vars" -name SecureBoot-* 2>/dev/null)
- setup_mode_file=$(find "$efi_vars" -name SetupMode-* 2>/dev/null)
- if [ -f "$secure_boot_file/data" ] && \
- [ -f "$setup_mode_file/data" ]; then
- secureboot_mode=`od -An -t u1 "$secure_boot_file/data"`
- setup_mode=`od -An -t u1 "$setup_mode_file/data"`
-
- if [ $secureboot_mode -eq 1 ] && [ $setup_mode -eq 0 ]; then
- log_info "secure boot mode enabled (CONFIG_EFI_VARS)"
- return 1;
- fi
- fi
- return 0;
-}
-
# On powerpc platform, check device-tree property
# /proc/device-tree/ibm,secureboot/os-secureboot-enforcing
# to detect secureboot state.
@@ -113,9 +87,8 @@ get_arch()
}
# Check efivar SecureBoot-$(the UUID) and SetupMode-$(the UUID).
-# The secure boot mode can be accessed either as the last integer
-# of "od -An -t u1 /sys/firmware/efi/efivars/SecureBoot-*" or from
-# "od -An -t u1 /sys/firmware/efi/vars/SecureBoot-*/data". The efi
+# The secure boot mode can be accessed as the last integer of
+# "od -An -t u1 /sys/firmware/efi/efivars/SecureBoot-*". The efi
# SetupMode can be similarly accessed.
# Return 1 for SecureBoot mode enabled and SetupMode mode disabled.
get_secureboot_mode()
@@ -129,11 +102,6 @@ get_secureboot_mode()
else
get_efivarfs_secureboot_mode
secureboot_mode=$?
- # fallback to using the efi_var files
- if [ $secureboot_mode -eq 0 ]; then
- get_efi_var_secureboot_mode
- secureboot_mode=$?
- fi
fi
if [ $secureboot_mode -eq 0 ]; then
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
index b8f248018174..33a0dbd26bd3 100644
--- a/tools/testing/selftests/kselftest.h
+++ b/tools/testing/selftests/kselftest.h
@@ -53,6 +53,21 @@
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
#endif
+/*
+ * gcc cpuid.h provides __cpuid_count() since v4.4.
+ * Clang/LLVM cpuid.h provides __cpuid_count() since v3.4.0.
+ *
+ * Provide local define for tests needing __cpuid_count() because
+ * selftests need to work in older environments that do not yet
+ * have __cpuid_count().
+ */
+#ifndef __cpuid_count
+#define __cpuid_count(level, count, a, b, c, d) \
+ __asm__ __volatile__ ("cpuid\n\t" \
+ : "=a" (a), "=b" (b), "=c" (c), "=d" (d) \
+ : "0" (level), "2" (count))
+#endif
+
/* define kselftest exit codes */
#define KSFT_PASS 0
#define KSFT_FAIL 1
diff --git a/tools/testing/selftests/kselftest_deps.sh b/tools/testing/selftests/kselftest_deps.sh
index 00e60d6eb16b..708cb5429633 100755
--- a/tools/testing/selftests/kselftest_deps.sh
+++ b/tools/testing/selftests/kselftest_deps.sh
@@ -26,7 +26,7 @@ echo " main Makefile when optional -p is specified."
echo "- Prints pass/fail dependency check for each tests/sub-test."
echo "- Prints pass/fail targets and libraries."
echo "- Default: runs dependency checks on all tests."
-echo "- Optional test name can be specified to check dependencies for it."
+echo "- Optional: test name can be specified to check dependencies for it."
exit 1
}
diff --git a/tools/testing/selftests/kselftest_module.h b/tools/testing/selftests/kselftest_module.h
index e2ea41de3f35..63cd7487373f 100644
--- a/tools/testing/selftests/kselftest_module.h
+++ b/tools/testing/selftests/kselftest_module.h
@@ -3,6 +3,7 @@
#define __KSELFTEST_MODULE_H
#include <linux/module.h>
+#include <linux/panic.h>
/*
* Test framework for writing test modules to be loaded by kselftest.
@@ -41,6 +42,7 @@ static inline int kstm_report(unsigned int total_tests, unsigned int failed_test
static int __init __module##_init(void) \
{ \
pr_info("loaded.\n"); \
+ add_taint(TAINT_TEST, LOCKDEP_STILL_OK); \
selftest(); \
return kstm_report(total_tests, failed_tests, skipped_tests); \
} \
@@ -51,4 +53,6 @@ static void __exit __module##_exit(void) \
module_init(__module##_init); \
module_exit(__module##_exit)
+MODULE_INFO(test, "Y");
+
#endif /* __KSELFTEST_MODULE_H */
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 0b0e4402bba6..d625a3f83780 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -2,7 +2,8 @@
/aarch64/arch_timer
/aarch64/debug-exceptions
/aarch64/get-reg-list
-/aarch64/psci_cpu_on_test
+/aarch64/hypercalls
+/aarch64/psci_test
/aarch64/vcpu_width_config
/aarch64/vgic_init
/aarch64/vgic_irq
@@ -16,6 +17,7 @@
/x86_64/debug_regs
/x86_64/evmcs_test
/x86_64/emulator_error_test
+/x86_64/fix_hypercall_test
/x86_64/get_msr_index_features
/x86_64/kvm_clock_test
/x86_64/kvm_pv_test
@@ -23,8 +25,10 @@
/x86_64/hyperv_cpuid
/x86_64/hyperv_features
/x86_64/hyperv_svm_test
+/x86_64/max_vcpuid_cap_test
/x86_64/mmio_warning_test
-/x86_64/mmu_role_test
+/x86_64/monitor_mwait_test
+/x86_64/nx_huge_pages_test
/x86_64/platform_info_test
/x86_64/pmu_event_filter_test
/x86_64/set_boot_cpu_id
@@ -34,9 +38,11 @@
/x86_64/state_test
/x86_64/svm_vmcall_test
/x86_64/svm_int_ctl_test
-/x86_64/tsc_scaling_sync
+/x86_64/svm_nested_soft_inject_test
/x86_64/sync_regs_test
/x86_64/tsc_msrs_test
+/x86_64/tsc_scaling_sync
+/x86_64/ucna_injection_test
/x86_64/userspace_io_test
/x86_64/userspace_msr_exit_test
/x86_64/vmx_apic_access_test
@@ -44,6 +50,7 @@
/x86_64/vmx_dirty_log_test
/x86_64/vmx_exception_with_invalid_guest_state
/x86_64/vmx_invalid_nested_guest_state
+/x86_64/vmx_msrs_test
/x86_64/vmx_preemption_timer_test
/x86_64/vmx_set_nested_state_test
/x86_64/vmx_tsc_adjust_test
@@ -53,7 +60,8 @@
/x86_64/xen_shinfo_test
/x86_64/xen_vmcall_test
/x86_64/xss_msr_test
-/x86_64/vmx_pmu_msrs_test
+/x86_64/vmx_pmu_caps_test
+/x86_64/triple_fault_event_test
/access_tracking_perf_test
/demand_paging_test
/dirty_log_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 681b173aa87c..c7f47429d6cd 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -4,7 +4,6 @@ include ../../../build/Build.include
all:
top_srcdir = ../../../..
-KSFT_KHDR_INSTALL := 1
# For cross-builds to work, UNAME_M has to map to ARCH and arch specific
# directories and targets in this Makefile. "uname -m" doesn't map to
@@ -37,17 +36,49 @@ ifeq ($(ARCH),riscv)
UNAME_M := riscv
endif
-LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/rbtree.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c
-LIBKVM_x86_64 = lib/x86_64/apic.c lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S
-LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c lib/aarch64/handlers.S lib/aarch64/spinlock.c lib/aarch64/gic.c lib/aarch64/gic_v3.c lib/aarch64/vgic.c
-LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c
-LIBKVM_riscv = lib/riscv/processor.c lib/riscv/ucall.c
-
+LIBKVM += lib/assert.c
+LIBKVM += lib/elf.c
+LIBKVM += lib/guest_modes.c
+LIBKVM += lib/io.c
+LIBKVM += lib/kvm_util.c
+LIBKVM += lib/perf_test_util.c
+LIBKVM += lib/rbtree.c
+LIBKVM += lib/sparsebit.c
+LIBKVM += lib/test_util.c
+
+LIBKVM_x86_64 += lib/x86_64/apic.c
+LIBKVM_x86_64 += lib/x86_64/handlers.S
+LIBKVM_x86_64 += lib/x86_64/perf_test_util.c
+LIBKVM_x86_64 += lib/x86_64/processor.c
+LIBKVM_x86_64 += lib/x86_64/svm.c
+LIBKVM_x86_64 += lib/x86_64/ucall.c
+LIBKVM_x86_64 += lib/x86_64/vmx.c
+
+LIBKVM_aarch64 += lib/aarch64/gic.c
+LIBKVM_aarch64 += lib/aarch64/gic_v3.c
+LIBKVM_aarch64 += lib/aarch64/handlers.S
+LIBKVM_aarch64 += lib/aarch64/processor.c
+LIBKVM_aarch64 += lib/aarch64/spinlock.c
+LIBKVM_aarch64 += lib/aarch64/ucall.c
+LIBKVM_aarch64 += lib/aarch64/vgic.c
+
+LIBKVM_s390x += lib/s390x/diag318_test_handler.c
+LIBKVM_s390x += lib/s390x/processor.c
+LIBKVM_s390x += lib/s390x/ucall.c
+
+LIBKVM_riscv += lib/riscv/processor.c
+LIBKVM_riscv += lib/riscv/ucall.c
+
+# Non-compiled test targets
+TEST_PROGS_x86_64 += x86_64/nx_huge_pages_test.sh
+
+# Compiled test targets
TEST_GEN_PROGS_x86_64 = x86_64/cpuid_test
TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test
TEST_GEN_PROGS_x86_64 += x86_64/get_msr_index_features
TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
TEST_GEN_PROGS_x86_64 += x86_64/emulator_error_test
+TEST_GEN_PROGS_x86_64 += x86_64/fix_hypercall_test
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_clock
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_features
@@ -55,7 +86,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/hyperv_svm_test
TEST_GEN_PROGS_x86_64 += x86_64/kvm_clock_test
TEST_GEN_PROGS_x86_64 += x86_64/kvm_pv_test
TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
-TEST_GEN_PROGS_x86_64 += x86_64/mmu_role_test
+TEST_GEN_PROGS_x86_64 += x86_64/monitor_mwait_test
TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
TEST_GEN_PROGS_x86_64 += x86_64/pmu_event_filter_test
TEST_GEN_PROGS_x86_64 += x86_64/set_boot_cpu_id
@@ -65,13 +96,17 @@ TEST_GEN_PROGS_x86_64 += x86_64/state_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
+TEST_GEN_PROGS_x86_64 += x86_64/svm_nested_soft_inject_test
+TEST_GEN_PROGS_x86_64 += x86_64/tsc_scaling_sync
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
+TEST_GEN_PROGS_x86_64 += x86_64/ucna_injection_test
TEST_GEN_PROGS_x86_64 += x86_64/userspace_io_test
TEST_GEN_PROGS_x86_64 += x86_64/userspace_msr_exit_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_exception_with_invalid_guest_state
+TEST_GEN_PROGS_x86_64 += x86_64/vmx_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_invalid_nested_guest_state
TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
@@ -81,11 +116,13 @@ TEST_GEN_PROGS_x86_64 += x86_64/xapic_state_test
TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test
TEST_GEN_PROGS_x86_64 += x86_64/debug_regs
TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
-TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
+TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_caps_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests
TEST_GEN_PROGS_x86_64 += x86_64/amx_test
+TEST_GEN_PROGS_x86_64 += x86_64/max_vcpuid_cap_test
+TEST_GEN_PROGS_x86_64 += x86_64/triple_fault_event_test
TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
TEST_GEN_PROGS_x86_64 += demand_paging_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
@@ -102,10 +139,14 @@ TEST_GEN_PROGS_x86_64 += steal_time
TEST_GEN_PROGS_x86_64 += kvm_binary_stats_test
TEST_GEN_PROGS_x86_64 += system_counter_offset_test
+# Compiled outputs used by test targets
+TEST_GEN_PROGS_EXTENDED_x86_64 += x86_64/nx_huge_pages_test
+
TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
-TEST_GEN_PROGS_aarch64 += aarch64/psci_cpu_on_test
+TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
+TEST_GEN_PROGS_aarch64 += aarch64/psci_test
TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config
TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
@@ -140,7 +181,9 @@ TEST_GEN_PROGS_riscv += kvm_page_table_test
TEST_GEN_PROGS_riscv += set_memory_region_test
TEST_GEN_PROGS_riscv += kvm_binary_stats_test
+TEST_PROGS += $(TEST_PROGS_$(UNAME_M))
TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M))
+TEST_GEN_PROGS_EXTENDED += $(TEST_GEN_PROGS_EXTENDED_$(UNAME_M))
LIBKVM += $(LIBKVM_$(UNAME_M))
INSTALL_HDR_PATH = $(top_srcdir)/usr
@@ -170,12 +213,13 @@ LDFLAGS += -pthread $(no-pie-option) $(pgste-option)
# $(TEST_GEN_PROGS) starts with $(OUTPUT)/
include ../lib.mk
-STATIC_LIBS := $(OUTPUT)/libkvm.a
LIBKVM_C := $(filter %.c,$(LIBKVM))
LIBKVM_S := $(filter %.S,$(LIBKVM))
LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C))
LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S))
-EXTRA_CLEAN += $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(STATIC_LIBS) cscope.*
+LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ)
+
+EXTRA_CLEAN += $(LIBKVM_OBJS) cscope.*
x := $(shell mkdir -p $(sort $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ))))
$(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c
@@ -184,13 +228,9 @@ $(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c
$(LIBKVM_S_OBJ): $(OUTPUT)/%.o: %.S
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
-LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ)
-$(OUTPUT)/libkvm.a: $(LIBKVM_OBJS)
- $(AR) crs $@ $^
-
x := $(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS))))
-all: $(STATIC_LIBS)
-$(TEST_GEN_PROGS): $(STATIC_LIBS)
+$(TEST_GEN_PROGS): $(LIBKVM_OBJS)
+$(TEST_GEN_PROGS_EXTENDED): $(LIBKVM_OBJS)
cscope: include_paths = $(LINUX_TOOL_INCLUDE) $(LINUX_HDR_PATH) include lib ..
cscope:
diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c
index 3b940a101bc0..574eb73f0e90 100644
--- a/tools/testing/selftests/kvm/aarch64/arch_timer.c
+++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c
@@ -76,13 +76,8 @@ struct test_vcpu_shared_data {
uint64_t xcnt;
};
-struct test_vcpu {
- uint32_t vcpuid;
- pthread_t pt_vcpu_run;
- struct kvm_vm *vm;
-};
-
-static struct test_vcpu test_vcpu[KVM_MAX_VCPUS];
+static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+static pthread_t pt_vcpu_run[KVM_MAX_VCPUS];
static struct test_vcpu_shared_data vcpu_shared_data[KVM_MAX_VCPUS];
static int vtimer_irq, ptimer_irq;
@@ -217,29 +212,32 @@ static void guest_code(void)
static void *test_vcpu_run(void *arg)
{
+ unsigned int vcpu_idx = (unsigned long)arg;
struct ucall uc;
- struct test_vcpu *vcpu = arg;
+ struct kvm_vcpu *vcpu = vcpus[vcpu_idx];
struct kvm_vm *vm = vcpu->vm;
- uint32_t vcpuid = vcpu->vcpuid;
- struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[vcpuid];
+ struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[vcpu_idx];
- vcpu_run(vm, vcpuid);
+ vcpu_run(vcpu);
/* Currently, any exit from guest is an indication of completion */
pthread_mutex_lock(&vcpu_done_map_lock);
- set_bit(vcpuid, vcpu_done_map);
+ set_bit(vcpu_idx, vcpu_done_map);
pthread_mutex_unlock(&vcpu_done_map_lock);
- switch (get_ucall(vm, vcpuid, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
case UCALL_DONE:
break;
case UCALL_ABORT:
sync_global_from_guest(vm, *shared_data);
- TEST_FAIL("%s at %s:%ld\n\tvalues: %lu, %lu; %lu, vcpu: %u; stage: %u; iter: %u",
- (const char *)uc.args[0], __FILE__, uc.args[1],
- uc.args[2], uc.args[3], uc.args[4], vcpuid,
- shared_data->guest_stage, shared_data->nr_iter);
+ REPORT_GUEST_ASSERT_N(uc, "values: %lu, %lu; %lu, vcpu %u; stage; %u; iter: %u",
+ GUEST_ASSERT_ARG(uc, 0),
+ GUEST_ASSERT_ARG(uc, 1),
+ GUEST_ASSERT_ARG(uc, 2),
+ vcpu_idx,
+ shared_data->guest_stage,
+ shared_data->nr_iter);
break;
default:
TEST_FAIL("Unexpected guest exit\n");
@@ -265,7 +263,7 @@ static uint32_t test_get_pcpu(void)
return pcpu;
}
-static int test_migrate_vcpu(struct test_vcpu *vcpu)
+static int test_migrate_vcpu(unsigned int vcpu_idx)
{
int ret;
cpu_set_t cpuset;
@@ -274,15 +272,15 @@ static int test_migrate_vcpu(struct test_vcpu *vcpu)
CPU_ZERO(&cpuset);
CPU_SET(new_pcpu, &cpuset);
- pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu->vcpuid, new_pcpu);
+ pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu_idx, new_pcpu);
- ret = pthread_setaffinity_np(vcpu->pt_vcpu_run,
- sizeof(cpuset), &cpuset);
+ ret = pthread_setaffinity_np(pt_vcpu_run[vcpu_idx],
+ sizeof(cpuset), &cpuset);
/* Allow the error where the vCPU thread is already finished */
TEST_ASSERT(ret == 0 || ret == ESRCH,
- "Failed to migrate the vCPU:%u to pCPU: %u; ret: %d\n",
- vcpu->vcpuid, new_pcpu, ret);
+ "Failed to migrate the vCPU:%u to pCPU: %u; ret: %d\n",
+ vcpu_idx, new_pcpu, ret);
return ret;
}
@@ -305,7 +303,7 @@ static void *test_vcpu_migration(void *arg)
continue;
}
- test_migrate_vcpu(&test_vcpu[i]);
+ test_migrate_vcpu(i);
}
} while (test_args.nr_vcpus != n_done);
@@ -314,16 +312,17 @@ static void *test_vcpu_migration(void *arg)
static void test_run(struct kvm_vm *vm)
{
- int i, ret;
pthread_t pt_vcpu_migration;
+ unsigned int i;
+ int ret;
pthread_mutex_init(&vcpu_done_map_lock, NULL);
vcpu_done_map = bitmap_zalloc(test_args.nr_vcpus);
TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap\n");
- for (i = 0; i < test_args.nr_vcpus; i++) {
- ret = pthread_create(&test_vcpu[i].pt_vcpu_run, NULL,
- test_vcpu_run, &test_vcpu[i]);
+ for (i = 0; i < (unsigned long)test_args.nr_vcpus; i++) {
+ ret = pthread_create(&pt_vcpu_run[i], NULL, test_vcpu_run,
+ (void *)(unsigned long)i);
TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread\n", i);
}
@@ -338,7 +337,7 @@ static void test_run(struct kvm_vm *vm)
for (i = 0; i < test_args.nr_vcpus; i++)
- pthread_join(test_vcpu[i].pt_vcpu_run, NULL);
+ pthread_join(pt_vcpu_run[i], NULL);
if (test_args.migration_freq_ms)
pthread_join(pt_vcpu_migration, NULL);
@@ -349,12 +348,10 @@ static void test_run(struct kvm_vm *vm)
static void test_init_timer_irq(struct kvm_vm *vm)
{
/* Timer initid should be same for all the vCPUs, so query only vCPU-0 */
- int vcpu0_fd = vcpu_get_fd(vm, 0);
-
- kvm_device_access(vcpu0_fd, KVM_ARM_VCPU_TIMER_CTRL,
- KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq, false);
- kvm_device_access(vcpu0_fd, KVM_ARM_VCPU_TIMER_CTRL,
- KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq, false);
+ vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
+ KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq);
+ vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL,
+ KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq);
sync_global_to_guest(vm, ptimer_irq);
sync_global_to_guest(vm, vtimer_irq);
@@ -370,25 +367,18 @@ static struct kvm_vm *test_vm_create(void)
unsigned int i;
int nr_vcpus = test_args.nr_vcpus;
- vm = vm_create_default_with_vcpus(nr_vcpus, 0, 0, guest_code, NULL);
+ vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
vm_init_descriptor_tables(vm);
vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler);
- for (i = 0; i < nr_vcpus; i++) {
- vcpu_init_descriptor_tables(vm, i);
-
- test_vcpu[i].vcpuid = i;
- test_vcpu[i].vm = vm;
- }
+ for (i = 0; i < nr_vcpus; i++)
+ vcpu_init_descriptor_tables(vcpus[i]);
ucall_init(vm, NULL);
test_init_timer_irq(vm);
gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
- if (gic_fd < 0) {
- print_skip("Failed to create vgic-v3");
- exit(KSFT_SKIP);
- }
+ __TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3");
/* Make all the test's cmdline args visible to the guest */
sync_global_to_guest(vm, test_args);
@@ -478,10 +468,8 @@ int main(int argc, char *argv[])
if (!parse_args(argc, argv))
exit(KSFT_SKIP);
- if (test_args.migration_freq_ms && get_nprocs() < 2) {
- print_skip("At least two physical CPUs needed for vCPU migration");
- exit(KSFT_SKIP);
- }
+ __TEST_REQUIRE(!test_args.migration_freq_ms || get_nprocs() >= 2,
+ "At least two physical CPUs needed for vCPU migration");
vm = test_vm_create();
test_run(vm);
diff --git a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c
index 63b2178210c4..2ee35cf9801e 100644
--- a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c
+++ b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c
@@ -3,8 +3,6 @@
#include <kvm_util.h>
#include <processor.h>
-#define VCPU_ID 0
-
#define MDSCR_KDE (1 << 13)
#define MDSCR_MDE (1 << 15)
#define MDSCR_SS (1 << 0)
@@ -240,31 +238,29 @@ static void guest_svc_handler(struct ex_regs *regs)
svc_addr = regs->pc;
}
-static int debug_version(struct kvm_vm *vm)
+static int debug_version(struct kvm_vcpu *vcpu)
{
uint64_t id_aa64dfr0;
- get_reg(vm, VCPU_ID, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &id_aa64dfr0);
+ vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &id_aa64dfr0);
return id_aa64dfr0 & 0xf;
}
int main(int argc, char *argv[])
{
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct ucall uc;
int stage;
- vm = vm_create_default(VCPU_ID, 0, guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
ucall_init(vm, NULL);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, VCPU_ID);
+ vcpu_init_descriptor_tables(vcpu);
- if (debug_version(vm) < 6) {
- print_skip("Armv8 debug architecture not supported.");
- kvm_vm_free(vm);
- exit(KSFT_SKIP);
- }
+ __TEST_REQUIRE(debug_version(vcpu) >= 6,
+ "Armv8 debug architecture not supported.");
vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
ESR_EC_BRK_INS, guest_sw_bp_handler);
@@ -278,18 +274,16 @@ int main(int argc, char *argv[])
ESR_EC_SVC64, guest_svc_handler);
for (stage = 0; stage < 11; stage++) {
- vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
TEST_ASSERT(uc.args[1] == stage,
"Stage %d: Unexpected sync ucall, got %lx",
stage, (ulong)uc.args[1]);
break;
case UCALL_ABORT:
- TEST_FAIL("%s at %s:%ld\n\tvalues: %#lx, %#lx",
- (const char *)uc.args[0],
- __FILE__, uc.args[1], uc.args[2], uc.args[3]);
+ REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
break;
case UCALL_DONE:
goto done;
diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
index 0b571f3fe64c..d287dd2cac0a 100644
--- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c
+++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
@@ -294,6 +294,11 @@ static void print_reg(struct vcpu_config *c, __u64 id)
"%s: Unexpected bits set in FW reg id: 0x%llx", config_name(c), id);
printf("\tKVM_REG_ARM_FW_REG(%lld),\n", id & 0xffff);
break;
+ case KVM_REG_ARM_FW_FEAT_BMAP:
+ TEST_ASSERT(id == KVM_REG_ARM_FW_FEAT_BMAP_REG(id & 0xffff),
+ "%s: Unexpected bits set in the bitmap feature FW reg id: 0x%llx", config_name(c), id);
+ printf("\tKVM_REG_ARM_FW_FEAT_BMAP_REG(%lld),\n", id & 0xffff);
+ break;
case KVM_REG_ARM64_SVE:
if (has_cap(c, KVM_CAP_ARM_SVE))
printf("\t%s,\n", sve_id_to_str(c, id));
@@ -372,7 +377,7 @@ static void prepare_vcpu_init(struct vcpu_config *c, struct kvm_vcpu_init *init)
init->features[s->feature / 32] |= 1 << (s->feature % 32);
}
-static void finalize_vcpu(struct kvm_vm *vm, uint32_t vcpuid, struct vcpu_config *c)
+static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_config *c)
{
struct reg_sublist *s;
int feature;
@@ -380,7 +385,7 @@ static void finalize_vcpu(struct kvm_vm *vm, uint32_t vcpuid, struct vcpu_config
for_each_sublist(c, s) {
if (s->finalize) {
feature = s->feature;
- vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_FINALIZE, &feature);
+ vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature);
}
}
}
@@ -390,10 +395,12 @@ static void check_supported(struct vcpu_config *c)
struct reg_sublist *s;
for_each_sublist(c, s) {
- if (s->capability && !kvm_check_cap(s->capability)) {
- fprintf(stderr, "%s: %s not available, skipping tests\n", config_name(c), s->name);
- exit(KSFT_SKIP);
- }
+ if (!s->capability)
+ continue;
+
+ __TEST_REQUIRE(kvm_has_cap(s->capability),
+ "%s: %s not available, skipping tests\n",
+ config_name(c), s->name);
}
}
@@ -406,17 +413,19 @@ static void run_test(struct vcpu_config *c)
struct kvm_vcpu_init init = { .target = -1, };
int new_regs = 0, missing_regs = 0, i, n;
int failed_get = 0, failed_set = 0, failed_reject = 0;
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct reg_sublist *s;
check_supported(c);
- vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+ vm = vm_create_barebones();
prepare_vcpu_init(c, &init);
- aarch64_vcpu_add_default(vm, 0, &init, NULL);
- finalize_vcpu(vm, 0, c);
+ vcpu = __vm_vcpu_add(vm, 0);
+ aarch64_vcpu_setup(vcpu, &init);
+ finalize_vcpu(vcpu, c);
- reg_list = vcpu_get_reg_list(vm, 0);
+ reg_list = vcpu_get_reg_list(vcpu);
if (fixup_core_regs)
core_reg_fixup();
@@ -452,7 +461,7 @@ static void run_test(struct vcpu_config *c)
bool reject_reg = false;
int ret;
- ret = _vcpu_ioctl(vm, 0, KVM_GET_ONE_REG, &reg);
+ ret = __vcpu_get_reg(vcpu, reg_list->reg[i], &addr);
if (ret) {
printf("%s: Failed to get ", config_name(c));
print_reg(c, reg.id);
@@ -464,7 +473,7 @@ static void run_test(struct vcpu_config *c)
for_each_sublist(c, s) {
if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) {
reject_reg = true;
- ret = _vcpu_ioctl(vm, 0, KVM_SET_ONE_REG, &reg);
+ ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
if (ret != -1 || errno != EPERM) {
printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno);
print_reg(c, reg.id);
@@ -476,7 +485,7 @@ static void run_test(struct vcpu_config *c)
}
if (!reject_reg) {
- ret = _vcpu_ioctl(vm, 0, KVM_SET_ONE_REG, &reg);
+ ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
if (ret) {
printf("%s: Failed to set ", config_name(c));
print_reg(c, reg.id);
@@ -692,6 +701,9 @@ static __u64 base_regs[] = {
KVM_REG_ARM_FW_REG(1), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 */
KVM_REG_ARM_FW_REG(2), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 */
KVM_REG_ARM_FW_REG(3), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 */
+ KVM_REG_ARM_FW_FEAT_BMAP_REG(0), /* KVM_REG_ARM_STD_BMAP */
+ KVM_REG_ARM_FW_FEAT_BMAP_REG(1), /* KVM_REG_ARM_STD_HYP_BMAP */
+ KVM_REG_ARM_FW_FEAT_BMAP_REG(2), /* KVM_REG_ARM_VENDOR_HYP_BMAP */
ARM64_SYS_REG(3, 3, 14, 3, 1), /* CNTV_CTL_EL0 */
ARM64_SYS_REG(3, 3, 14, 3, 2), /* CNTV_CVAL_EL0 */
ARM64_SYS_REG(3, 3, 14, 0, 2),
diff --git a/tools/testing/selftests/kvm/aarch64/hypercalls.c b/tools/testing/selftests/kvm/aarch64/hypercalls.c
new file mode 100644
index 000000000000..a39da3fe4952
--- /dev/null
+++ b/tools/testing/selftests/kvm/aarch64/hypercalls.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* hypercalls: Check the ARM64's psuedo-firmware bitmap register interface.
+ *
+ * The test validates the basic hypercall functionalities that are exposed
+ * via the psuedo-firmware bitmap register. This includes the registers'
+ * read/write behavior before and after the VM has started, and if the
+ * hypercalls are properly masked or unmasked to the guest when disabled or
+ * enabled from the KVM userspace, respectively.
+ */
+
+#include <errno.h>
+#include <linux/arm-smccc.h>
+#include <asm/kvm.h>
+#include <kvm_util.h>
+
+#include "processor.h"
+
+#define FW_REG_ULIMIT_VAL(max_feat_bit) (GENMASK(max_feat_bit, 0))
+
+/* Last valid bits of the bitmapped firmware registers */
+#define KVM_REG_ARM_STD_BMAP_BIT_MAX 0
+#define KVM_REG_ARM_STD_HYP_BMAP_BIT_MAX 0
+#define KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_MAX 1
+
+struct kvm_fw_reg_info {
+ uint64_t reg; /* Register definition */
+ uint64_t max_feat_bit; /* Bit that represents the upper limit of the feature-map */
+};
+
+#define FW_REG_INFO(r) \
+ { \
+ .reg = r, \
+ .max_feat_bit = r##_BIT_MAX, \
+ }
+
+static const struct kvm_fw_reg_info fw_reg_info[] = {
+ FW_REG_INFO(KVM_REG_ARM_STD_BMAP),
+ FW_REG_INFO(KVM_REG_ARM_STD_HYP_BMAP),
+ FW_REG_INFO(KVM_REG_ARM_VENDOR_HYP_BMAP),
+};
+
+enum test_stage {
+ TEST_STAGE_REG_IFACE,
+ TEST_STAGE_HVC_IFACE_FEAT_DISABLED,
+ TEST_STAGE_HVC_IFACE_FEAT_ENABLED,
+ TEST_STAGE_HVC_IFACE_FALSE_INFO,
+ TEST_STAGE_END,
+};
+
+static int stage = TEST_STAGE_REG_IFACE;
+
+struct test_hvc_info {
+ uint32_t func_id;
+ uint64_t arg1;
+};
+
+#define TEST_HVC_INFO(f, a1) \
+ { \
+ .func_id = f, \
+ .arg1 = a1, \
+ }
+
+static const struct test_hvc_info hvc_info[] = {
+ /* KVM_REG_ARM_STD_BMAP */
+ TEST_HVC_INFO(ARM_SMCCC_TRNG_VERSION, 0),
+ TEST_HVC_INFO(ARM_SMCCC_TRNG_FEATURES, ARM_SMCCC_TRNG_RND64),
+ TEST_HVC_INFO(ARM_SMCCC_TRNG_GET_UUID, 0),
+ TEST_HVC_INFO(ARM_SMCCC_TRNG_RND32, 0),
+ TEST_HVC_INFO(ARM_SMCCC_TRNG_RND64, 0),
+
+ /* KVM_REG_ARM_STD_HYP_BMAP */
+ TEST_HVC_INFO(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_HV_PV_TIME_FEATURES),
+ TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_FEATURES, ARM_SMCCC_HV_PV_TIME_ST),
+ TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_ST, 0),
+
+ /* KVM_REG_ARM_VENDOR_HYP_BMAP */
+ TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID,
+ ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID),
+ TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, 0),
+ TEST_HVC_INFO(ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID, KVM_PTP_VIRT_COUNTER),
+};
+
+/* Feed false hypercall info to test the KVM behavior */
+static const struct test_hvc_info false_hvc_info[] = {
+ /* Feature support check against a different family of hypercalls */
+ TEST_HVC_INFO(ARM_SMCCC_TRNG_FEATURES, ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID),
+ TEST_HVC_INFO(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, ARM_SMCCC_TRNG_RND64),
+ TEST_HVC_INFO(ARM_SMCCC_HV_PV_TIME_FEATURES, ARM_SMCCC_TRNG_RND64),
+};
+
+static void guest_test_hvc(const struct test_hvc_info *hc_info)
+{
+ unsigned int i;
+ struct arm_smccc_res res;
+ unsigned int hvc_info_arr_sz;
+
+ hvc_info_arr_sz =
+ hc_info == hvc_info ? ARRAY_SIZE(hvc_info) : ARRAY_SIZE(false_hvc_info);
+
+ for (i = 0; i < hvc_info_arr_sz; i++, hc_info++) {
+ memset(&res, 0, sizeof(res));
+ smccc_hvc(hc_info->func_id, hc_info->arg1, 0, 0, 0, 0, 0, 0, &res);
+
+ switch (stage) {
+ case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
+ case TEST_STAGE_HVC_IFACE_FALSE_INFO:
+ GUEST_ASSERT_3(res.a0 == SMCCC_RET_NOT_SUPPORTED,
+ res.a0, hc_info->func_id, hc_info->arg1);
+ break;
+ case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
+ GUEST_ASSERT_3(res.a0 != SMCCC_RET_NOT_SUPPORTED,
+ res.a0, hc_info->func_id, hc_info->arg1);
+ break;
+ default:
+ GUEST_ASSERT_1(0, stage);
+ }
+ }
+}
+
+static void guest_code(void)
+{
+ while (stage != TEST_STAGE_END) {
+ switch (stage) {
+ case TEST_STAGE_REG_IFACE:
+ break;
+ case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
+ case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
+ guest_test_hvc(hvc_info);
+ break;
+ case TEST_STAGE_HVC_IFACE_FALSE_INFO:
+ guest_test_hvc(false_hvc_info);
+ break;
+ default:
+ GUEST_ASSERT_1(0, stage);
+ }
+
+ GUEST_SYNC(stage);
+ }
+
+ GUEST_DONE();
+}
+
+struct st_time {
+ uint32_t rev;
+ uint32_t attr;
+ uint64_t st_time;
+};
+
+#define STEAL_TIME_SIZE ((sizeof(struct st_time) + 63) & ~63)
+#define ST_GPA_BASE (1 << 30)
+
+static void steal_time_init(struct kvm_vcpu *vcpu)
+{
+ uint64_t st_ipa = (ulong)ST_GPA_BASE;
+ unsigned int gpages;
+
+ gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE);
+ vm_userspace_mem_region_add(vcpu->vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
+
+ vcpu_device_attr_set(vcpu, KVM_ARM_VCPU_PVTIME_CTRL,
+ KVM_ARM_VCPU_PVTIME_IPA, &st_ipa);
+}
+
+static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
+{
+ uint64_t val;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) {
+ const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
+
+ /* First 'read' should be an upper limit of the features supported */
+ vcpu_get_reg(vcpu, reg_info->reg, &val);
+ TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit),
+ "Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx\n",
+ reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val);
+
+ /* Test a 'write' by disabling all the features of the register map */
+ ret = __vcpu_set_reg(vcpu, reg_info->reg, 0);
+ TEST_ASSERT(ret == 0,
+ "Failed to clear all the features of reg: 0x%lx; ret: %d\n",
+ reg_info->reg, errno);
+
+ vcpu_get_reg(vcpu, reg_info->reg, &val);
+ TEST_ASSERT(val == 0,
+ "Expected all the features to be cleared for reg: 0x%lx\n", reg_info->reg);
+
+ /*
+ * Test enabling a feature that's not supported.
+ * Avoid this check if all the bits are occupied.
+ */
+ if (reg_info->max_feat_bit < 63) {
+ ret = __vcpu_set_reg(vcpu, reg_info->reg, BIT(reg_info->max_feat_bit + 1));
+ TEST_ASSERT(ret != 0 && errno == EINVAL,
+ "Unexpected behavior or return value (%d) while setting an unsupported feature for reg: 0x%lx\n",
+ errno, reg_info->reg);
+ }
+ }
+}
+
+static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu)
+{
+ uint64_t val;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) {
+ const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
+
+ /*
+ * Before starting the VM, the test clears all the bits.
+ * Check if that's still the case.
+ */
+ vcpu_get_reg(vcpu, reg_info->reg, &val);
+ TEST_ASSERT(val == 0,
+ "Expected all the features to be cleared for reg: 0x%lx\n",
+ reg_info->reg);
+
+ /*
+ * Since the VM has run at least once, KVM shouldn't allow modification of
+ * the registers and should return EBUSY. Set the registers and check for
+ * the expected errno.
+ */
+ ret = __vcpu_set_reg(vcpu, reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit));
+ TEST_ASSERT(ret != 0 && errno == EBUSY,
+ "Unexpected behavior or return value (%d) while setting a feature while VM is running for reg: 0x%lx\n",
+ errno, reg_info->reg);
+ }
+}
+
+static struct kvm_vm *test_vm_create(struct kvm_vcpu **vcpu)
+{
+ struct kvm_vm *vm;
+
+ vm = vm_create_with_one_vcpu(vcpu, guest_code);
+
+ ucall_init(vm, NULL);
+ steal_time_init(*vcpu);
+
+ return vm;
+}
+
+static void test_guest_stage(struct kvm_vm **vm, struct kvm_vcpu **vcpu)
+{
+ int prev_stage = stage;
+
+ pr_debug("Stage: %d\n", prev_stage);
+
+ /* Sync the stage early, the VM might be freed below. */
+ stage++;
+ sync_global_to_guest(*vm, stage);
+
+ switch (prev_stage) {
+ case TEST_STAGE_REG_IFACE:
+ test_fw_regs_after_vm_start(*vcpu);
+ break;
+ case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
+ /* Start a new VM so that all the features are now enabled by default */
+ kvm_vm_free(*vm);
+ *vm = test_vm_create(vcpu);
+ break;
+ case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
+ case TEST_STAGE_HVC_IFACE_FALSE_INFO:
+ break;
+ default:
+ TEST_FAIL("Unknown test stage: %d\n", prev_stage);
+ }
+}
+
+static void test_run(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ struct ucall uc;
+ bool guest_done = false;
+
+ vm = test_vm_create(&vcpu);
+
+ test_fw_regs_before_vm_start(vcpu);
+
+ while (!guest_done) {
+ vcpu_run(vcpu);
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_SYNC:
+ test_guest_stage(&vm, &vcpu);
+ break;
+ case UCALL_DONE:
+ guest_done = true;
+ break;
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT_N(uc, "values: 0x%lx, 0x%lx; 0x%lx, stage: %u",
+ GUEST_ASSERT_ARG(uc, 0),
+ GUEST_ASSERT_ARG(uc, 1),
+ GUEST_ASSERT_ARG(uc, 2), stage);
+ break;
+ default:
+ TEST_FAIL("Unexpected guest exit\n");
+ }
+ }
+
+ kvm_vm_free(vm);
+}
+
+int main(void)
+{
+ setbuf(stdout, NULL);
+
+ test_run();
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/aarch64/psci_cpu_on_test.c b/tools/testing/selftests/kvm/aarch64/psci_cpu_on_test.c
deleted file mode 100644
index 4c5f6814030f..000000000000
--- a/tools/testing/selftests/kvm/aarch64/psci_cpu_on_test.c
+++ /dev/null
@@ -1,121 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * psci_cpu_on_test - Test that the observable state of a vCPU targeted by the
- * CPU_ON PSCI call matches what the caller requested.
- *
- * Copyright (c) 2021 Google LLC.
- *
- * This is a regression test for a race between KVM servicing the PSCI call and
- * userspace reading the vCPUs registers.
- */
-
-#define _GNU_SOURCE
-
-#include <linux/psci.h>
-
-#include "kvm_util.h"
-#include "processor.h"
-#include "test_util.h"
-
-#define VCPU_ID_SOURCE 0
-#define VCPU_ID_TARGET 1
-
-#define CPU_ON_ENTRY_ADDR 0xfeedf00dul
-#define CPU_ON_CONTEXT_ID 0xdeadc0deul
-
-static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr,
- uint64_t context_id)
-{
- register uint64_t x0 asm("x0") = PSCI_0_2_FN64_CPU_ON;
- register uint64_t x1 asm("x1") = target_cpu;
- register uint64_t x2 asm("x2") = entry_addr;
- register uint64_t x3 asm("x3") = context_id;
-
- asm("hvc #0"
- : "=r"(x0)
- : "r"(x0), "r"(x1), "r"(x2), "r"(x3)
- : "memory");
-
- return x0;
-}
-
-static uint64_t psci_affinity_info(uint64_t target_affinity,
- uint64_t lowest_affinity_level)
-{
- register uint64_t x0 asm("x0") = PSCI_0_2_FN64_AFFINITY_INFO;
- register uint64_t x1 asm("x1") = target_affinity;
- register uint64_t x2 asm("x2") = lowest_affinity_level;
-
- asm("hvc #0"
- : "=r"(x0)
- : "r"(x0), "r"(x1), "r"(x2)
- : "memory");
-
- return x0;
-}
-
-static void guest_main(uint64_t target_cpu)
-{
- GUEST_ASSERT(!psci_cpu_on(target_cpu, CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID));
- uint64_t target_state;
-
- do {
- target_state = psci_affinity_info(target_cpu, 0);
-
- GUEST_ASSERT((target_state == PSCI_0_2_AFFINITY_LEVEL_ON) ||
- (target_state == PSCI_0_2_AFFINITY_LEVEL_OFF));
- } while (target_state != PSCI_0_2_AFFINITY_LEVEL_ON);
-
- GUEST_DONE();
-}
-
-int main(void)
-{
- uint64_t target_mpidr, obs_pc, obs_x0;
- struct kvm_vcpu_init init;
- struct kvm_vm *vm;
- struct ucall uc;
-
- vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
- kvm_vm_elf_load(vm, program_invocation_name);
- ucall_init(vm, NULL);
-
- vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
- init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2);
-
- aarch64_vcpu_add_default(vm, VCPU_ID_SOURCE, &init, guest_main);
-
- /*
- * make sure the target is already off when executing the test.
- */
- init.features[0] |= (1 << KVM_ARM_VCPU_POWER_OFF);
- aarch64_vcpu_add_default(vm, VCPU_ID_TARGET, &init, guest_main);
-
- get_reg(vm, VCPU_ID_TARGET, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr);
- vcpu_args_set(vm, VCPU_ID_SOURCE, 1, target_mpidr & MPIDR_HWID_BITMASK);
- vcpu_run(vm, VCPU_ID_SOURCE);
-
- switch (get_ucall(vm, VCPU_ID_SOURCE, &uc)) {
- case UCALL_DONE:
- break;
- case UCALL_ABORT:
- TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], __FILE__,
- uc.args[1]);
- break;
- default:
- TEST_FAIL("Unhandled ucall: %lu", uc.cmd);
- }
-
- get_reg(vm, VCPU_ID_TARGET, ARM64_CORE_REG(regs.pc), &obs_pc);
- get_reg(vm, VCPU_ID_TARGET, ARM64_CORE_REG(regs.regs[0]), &obs_x0);
-
- TEST_ASSERT(obs_pc == CPU_ON_ENTRY_ADDR,
- "unexpected target cpu pc: %lx (expected: %lx)",
- obs_pc, CPU_ON_ENTRY_ADDR);
- TEST_ASSERT(obs_x0 == CPU_ON_CONTEXT_ID,
- "unexpected target context id: %lx (expected: %lx)",
- obs_x0, CPU_ON_CONTEXT_ID);
-
- kvm_vm_free(vm);
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/aarch64/psci_test.c b/tools/testing/selftests/kvm/aarch64/psci_test.c
new file mode 100644
index 000000000000..f7621f6e938e
--- /dev/null
+++ b/tools/testing/selftests/kvm/aarch64/psci_test.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * psci_cpu_on_test - Test that the observable state of a vCPU targeted by the
+ * CPU_ON PSCI call matches what the caller requested.
+ *
+ * Copyright (c) 2021 Google LLC.
+ *
+ * This is a regression test for a race between KVM servicing the PSCI call and
+ * userspace reading the vCPUs registers.
+ */
+
+#define _GNU_SOURCE
+
+#include <linux/psci.h>
+
+#include "kvm_util.h"
+#include "processor.h"
+#include "test_util.h"
+
+#define CPU_ON_ENTRY_ADDR 0xfeedf00dul
+#define CPU_ON_CONTEXT_ID 0xdeadc0deul
+
+static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr,
+ uint64_t context_id)
+{
+ struct arm_smccc_res res;
+
+ smccc_hvc(PSCI_0_2_FN64_CPU_ON, target_cpu, entry_addr, context_id,
+ 0, 0, 0, 0, &res);
+
+ return res.a0;
+}
+
+static uint64_t psci_affinity_info(uint64_t target_affinity,
+ uint64_t lowest_affinity_level)
+{
+ struct arm_smccc_res res;
+
+ smccc_hvc(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity, lowest_affinity_level,
+ 0, 0, 0, 0, 0, &res);
+
+ return res.a0;
+}
+
+static uint64_t psci_system_suspend(uint64_t entry_addr, uint64_t context_id)
+{
+ struct arm_smccc_res res;
+
+ smccc_hvc(PSCI_1_0_FN64_SYSTEM_SUSPEND, entry_addr, context_id,
+ 0, 0, 0, 0, 0, &res);
+
+ return res.a0;
+}
+
+static uint64_t psci_features(uint32_t func_id)
+{
+ struct arm_smccc_res res;
+
+ smccc_hvc(PSCI_1_0_FN_PSCI_FEATURES, func_id, 0, 0, 0, 0, 0, 0, &res);
+
+ return res.a0;
+}
+
+static void vcpu_power_off(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mp_state mp_state = {
+ .mp_state = KVM_MP_STATE_STOPPED,
+ };
+
+ vcpu_mp_state_set(vcpu, &mp_state);
+}
+
+static struct kvm_vm *setup_vm(void *guest_code, struct kvm_vcpu **source,
+ struct kvm_vcpu **target)
+{
+ struct kvm_vcpu_init init;
+ struct kvm_vm *vm;
+
+ vm = vm_create(2);
+ ucall_init(vm, NULL);
+
+ vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
+ init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2);
+
+ *source = aarch64_vcpu_add(vm, 0, &init, guest_code);
+ *target = aarch64_vcpu_add(vm, 1, &init, guest_code);
+
+ return vm;
+}
+
+static void enter_guest(struct kvm_vcpu *vcpu)
+{
+ struct ucall uc;
+
+ vcpu_run(vcpu);
+ if (get_ucall(vcpu, &uc) == UCALL_ABORT)
+ REPORT_GUEST_ASSERT(uc);
+}
+
+static void assert_vcpu_reset(struct kvm_vcpu *vcpu)
+{
+ uint64_t obs_pc, obs_x0;
+
+ vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &obs_pc);
+ vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.regs[0]), &obs_x0);
+
+ TEST_ASSERT(obs_pc == CPU_ON_ENTRY_ADDR,
+ "unexpected target cpu pc: %lx (expected: %lx)",
+ obs_pc, CPU_ON_ENTRY_ADDR);
+ TEST_ASSERT(obs_x0 == CPU_ON_CONTEXT_ID,
+ "unexpected target context id: %lx (expected: %lx)",
+ obs_x0, CPU_ON_CONTEXT_ID);
+}
+
+static void guest_test_cpu_on(uint64_t target_cpu)
+{
+ uint64_t target_state;
+
+ GUEST_ASSERT(!psci_cpu_on(target_cpu, CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID));
+
+ do {
+ target_state = psci_affinity_info(target_cpu, 0);
+
+ GUEST_ASSERT((target_state == PSCI_0_2_AFFINITY_LEVEL_ON) ||
+ (target_state == PSCI_0_2_AFFINITY_LEVEL_OFF));
+ } while (target_state != PSCI_0_2_AFFINITY_LEVEL_ON);
+
+ GUEST_DONE();
+}
+
+static void host_test_cpu_on(void)
+{
+ struct kvm_vcpu *source, *target;
+ uint64_t target_mpidr;
+ struct kvm_vm *vm;
+ struct ucall uc;
+
+ vm = setup_vm(guest_test_cpu_on, &source, &target);
+
+ /*
+ * make sure the target is already off when executing the test.
+ */
+ vcpu_power_off(target);
+
+ vcpu_get_reg(target, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr);
+ vcpu_args_set(source, 1, target_mpidr & MPIDR_HWID_BITMASK);
+ enter_guest(source);
+
+ if (get_ucall(source, &uc) != UCALL_DONE)
+ TEST_FAIL("Unhandled ucall: %lu", uc.cmd);
+
+ assert_vcpu_reset(target);
+ kvm_vm_free(vm);
+}
+
+static void guest_test_system_suspend(void)
+{
+ uint64_t ret;
+
+ /* assert that SYSTEM_SUSPEND is discoverable */
+ GUEST_ASSERT(!psci_features(PSCI_1_0_FN_SYSTEM_SUSPEND));
+ GUEST_ASSERT(!psci_features(PSCI_1_0_FN64_SYSTEM_SUSPEND));
+
+ ret = psci_system_suspend(CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID);
+ GUEST_SYNC(ret);
+}
+
+static void host_test_system_suspend(void)
+{
+ struct kvm_vcpu *source, *target;
+ struct kvm_run *run;
+ struct kvm_vm *vm;
+
+ vm = setup_vm(guest_test_system_suspend, &source, &target);
+ vm_enable_cap(vm, KVM_CAP_ARM_SYSTEM_SUSPEND, 0);
+
+ vcpu_power_off(target);
+ run = source->run;
+
+ enter_guest(source);
+
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_SYSTEM_EVENT,
+ "Unhandled exit reason: %u (%s)",
+ run->exit_reason, exit_reason_str(run->exit_reason));
+ TEST_ASSERT(run->system_event.type == KVM_SYSTEM_EVENT_SUSPEND,
+ "Unhandled system event: %u (expected: %u)",
+ run->system_event.type, KVM_SYSTEM_EVENT_SUSPEND);
+
+ kvm_vm_free(vm);
+}
+
+int main(void)
+{
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SYSTEM_SUSPEND));
+
+ host_test_cpu_on();
+ host_test_system_suspend();
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/aarch64/vcpu_width_config.c b/tools/testing/selftests/kvm/aarch64/vcpu_width_config.c
index 6e9402679229..80b74c6f152b 100644
--- a/tools/testing/selftests/kvm/aarch64/vcpu_width_config.c
+++ b/tools/testing/selftests/kvm/aarch64/vcpu_width_config.c
@@ -15,24 +15,25 @@
/*
- * Add a vCPU, run KVM_ARM_VCPU_INIT with @init1, and then
- * add another vCPU, and run KVM_ARM_VCPU_INIT with @init2.
+ * Add a vCPU, run KVM_ARM_VCPU_INIT with @init0, and then
+ * add another vCPU, and run KVM_ARM_VCPU_INIT with @init1.
*/
-static int add_init_2vcpus(struct kvm_vcpu_init *init1,
- struct kvm_vcpu_init *init2)
+static int add_init_2vcpus(struct kvm_vcpu_init *init0,
+ struct kvm_vcpu_init *init1)
{
+ struct kvm_vcpu *vcpu0, *vcpu1;
struct kvm_vm *vm;
int ret;
- vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+ vm = vm_create_barebones();
- vm_vcpu_add(vm, 0);
- ret = _vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
+ vcpu0 = __vm_vcpu_add(vm, 0);
+ ret = __vcpu_ioctl(vcpu0, KVM_ARM_VCPU_INIT, init0);
if (ret)
goto free_exit;
- vm_vcpu_add(vm, 1);
- ret = _vcpu_ioctl(vm, 1, KVM_ARM_VCPU_INIT, init2);
+ vcpu1 = __vm_vcpu_add(vm, 1);
+ ret = __vcpu_ioctl(vcpu1, KVM_ARM_VCPU_INIT, init1);
free_exit:
kvm_vm_free(vm);
@@ -40,25 +41,26 @@ free_exit:
}
/*
- * Add two vCPUs, then run KVM_ARM_VCPU_INIT for one vCPU with @init1,
- * and run KVM_ARM_VCPU_INIT for another vCPU with @init2.
+ * Add two vCPUs, then run KVM_ARM_VCPU_INIT for one vCPU with @init0,
+ * and run KVM_ARM_VCPU_INIT for another vCPU with @init1.
*/
-static int add_2vcpus_init_2vcpus(struct kvm_vcpu_init *init1,
- struct kvm_vcpu_init *init2)
+static int add_2vcpus_init_2vcpus(struct kvm_vcpu_init *init0,
+ struct kvm_vcpu_init *init1)
{
+ struct kvm_vcpu *vcpu0, *vcpu1;
struct kvm_vm *vm;
int ret;
- vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+ vm = vm_create_barebones();
- vm_vcpu_add(vm, 0);
- vm_vcpu_add(vm, 1);
+ vcpu0 = __vm_vcpu_add(vm, 0);
+ vcpu1 = __vm_vcpu_add(vm, 1);
- ret = _vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
+ ret = __vcpu_ioctl(vcpu0, KVM_ARM_VCPU_INIT, init0);
if (ret)
goto free_exit;
- ret = _vcpu_ioctl(vm, 1, KVM_ARM_VCPU_INIT, init2);
+ ret = __vcpu_ioctl(vcpu1, KVM_ARM_VCPU_INIT, init1);
free_exit:
kvm_vm_free(vm);
@@ -76,45 +78,42 @@ free_exit:
*/
int main(void)
{
- struct kvm_vcpu_init init1, init2;
+ struct kvm_vcpu_init init0, init1;
struct kvm_vm *vm;
int ret;
- if (!kvm_check_cap(KVM_CAP_ARM_EL1_32BIT)) {
- print_skip("KVM_CAP_ARM_EL1_32BIT is not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_EL1_32BIT));
- /* Get the preferred target type and copy that to init2 for later use */
- vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
- vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init1);
+ /* Get the preferred target type and copy that to init1 for later use */
+ vm = vm_create_barebones();
+ vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init0);
kvm_vm_free(vm);
- init2 = init1;
+ init1 = init0;
/* Test with 64bit vCPUs */
- ret = add_init_2vcpus(&init1, &init1);
+ ret = add_init_2vcpus(&init0, &init0);
TEST_ASSERT(ret == 0,
"Configuring 64bit EL1 vCPUs failed unexpectedly");
- ret = add_2vcpus_init_2vcpus(&init1, &init1);
+ ret = add_2vcpus_init_2vcpus(&init0, &init0);
TEST_ASSERT(ret == 0,
"Configuring 64bit EL1 vCPUs failed unexpectedly");
/* Test with 32bit vCPUs */
- init1.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
- ret = add_init_2vcpus(&init1, &init1);
+ init0.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
+ ret = add_init_2vcpus(&init0, &init0);
TEST_ASSERT(ret == 0,
"Configuring 32bit EL1 vCPUs failed unexpectedly");
- ret = add_2vcpus_init_2vcpus(&init1, &init1);
+ ret = add_2vcpus_init_2vcpus(&init0, &init0);
TEST_ASSERT(ret == 0,
"Configuring 32bit EL1 vCPUs failed unexpectedly");
/* Test with mixed-width vCPUs */
- init1.features[0] = 0;
- init2.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
- ret = add_init_2vcpus(&init1, &init2);
+ init0.features[0] = 0;
+ init1.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
+ ret = add_init_2vcpus(&init0, &init1);
TEST_ASSERT(ret != 0,
"Configuring mixed-width vCPUs worked unexpectedly");
- ret = add_2vcpus_init_2vcpus(&init1, &init2);
+ ret = add_2vcpus_init_2vcpus(&init0, &init1);
TEST_ASSERT(ret != 0,
"Configuring mixed-width vCPUs worked unexpectedly");
diff --git a/tools/testing/selftests/kvm/aarch64/vgic_init.c b/tools/testing/selftests/kvm/aarch64/vgic_init.c
index 34379c98d2f4..e05ecb31823f 100644
--- a/tools/testing/selftests/kvm/aarch64/vgic_init.c
+++ b/tools/testing/selftests/kvm/aarch64/vgic_init.c
@@ -32,14 +32,28 @@ struct vm_gic {
static uint64_t max_phys_size;
-/* helper to access a redistributor register */
-static int access_v3_redist_reg(int gicv3_fd, int vcpu, int offset,
- uint32_t *val, bool write)
+/*
+ * Helpers to access a redistributor register and verify the ioctl() failed or
+ * succeeded as expected, and provided the correct value on success.
+ */
+static void v3_redist_reg_get_errno(int gicv3_fd, int vcpu, int offset,
+ int want, const char *msg)
{
- uint64_t attr = REG_OFFSET(vcpu, offset);
+ uint32_t ignored_val;
+ int ret = __kvm_device_attr_get(gicv3_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS,
+ REG_OFFSET(vcpu, offset), &ignored_val);
- return _kvm_device_access(gicv3_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS,
- attr, val, write);
+ TEST_ASSERT(ret && errno == want, "%s; want errno = %d", msg, want);
+}
+
+static void v3_redist_reg_get(int gicv3_fd, int vcpu, int offset, uint32_t want,
+ const char *msg)
+{
+ uint32_t val;
+
+ kvm_device_attr_get(gicv3_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS,
+ REG_OFFSET(vcpu, offset), &val);
+ TEST_ASSERT(val == want, "%s; want '0x%x', got '0x%x'", msg, want, val);
}
/* dummy guest code */
@@ -52,22 +66,22 @@ static void guest_code(void)
}
/* we don't want to assert on run execution, hence that helper */
-static int run_vcpu(struct kvm_vm *vm, uint32_t vcpuid)
+static int run_vcpu(struct kvm_vcpu *vcpu)
{
- ucall_init(vm, NULL);
- int ret = _vcpu_ioctl(vm, vcpuid, KVM_RUN, NULL);
- if (ret)
- return -errno;
- return 0;
+ ucall_init(vcpu->vm, NULL);
+
+ return __vcpu_run(vcpu) ? -errno : 0;
}
-static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type, uint32_t nr_vcpus)
+static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type,
+ uint32_t nr_vcpus,
+ struct kvm_vcpu *vcpus[])
{
struct vm_gic v;
v.gic_dev_type = gic_dev_type;
- v.vm = vm_create_default_with_vcpus(nr_vcpus, 0, 0, guest_code, NULL);
- v.gic_fd = kvm_create_device(v.vm, gic_dev_type, false);
+ v.vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
+ v.gic_fd = kvm_create_device(v.vm, gic_dev_type);
return v;
}
@@ -129,63 +143,60 @@ static void subtest_dist_rdist(struct vm_gic *v)
: gic_v2_dist_region;
/* Check existing group/attributes */
- kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- dist.attr);
+ kvm_has_device_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, dist.attr);
- kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- rdist.attr);
+ kvm_has_device_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, rdist.attr);
/* check non existing attribute */
- ret = _kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, -1);
+ ret = __kvm_has_device_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, -1);
TEST_ASSERT(ret && errno == ENXIO, "attribute not supported");
/* misaligned DIST and REDIST address settings */
addr = dist.alignment / 0x10;
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- dist.attr, &addr, true);
+ ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ dist.attr, &addr);
TEST_ASSERT(ret && errno == EINVAL, "GIC dist base not aligned");
addr = rdist.alignment / 0x10;
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- rdist.attr, &addr, true);
+ ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ rdist.attr, &addr);
TEST_ASSERT(ret && errno == EINVAL, "GIC redist/cpu base not aligned");
/* out of range address */
addr = max_phys_size;
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- dist.attr, &addr, true);
+ ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ dist.attr, &addr);
TEST_ASSERT(ret && errno == E2BIG, "dist address beyond IPA limit");
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- rdist.attr, &addr, true);
+ ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ rdist.attr, &addr);
TEST_ASSERT(ret && errno == E2BIG, "redist address beyond IPA limit");
/* Space for half a rdist (a rdist is: 2 * rdist.alignment). */
addr = max_phys_size - dist.alignment;
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- rdist.attr, &addr, true);
+ ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ rdist.attr, &addr);
TEST_ASSERT(ret && errno == E2BIG,
"half of the redist is beyond IPA limit");
/* set REDIST base address @0x0*/
addr = 0x00000;
- kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- rdist.attr, &addr, true);
+ kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ rdist.attr, &addr);
/* Attempt to create a second legacy redistributor region */
addr = 0xE0000;
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- rdist.attr, &addr, true);
+ ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ rdist.attr, &addr);
TEST_ASSERT(ret && errno == EEXIST, "GIC redist base set again");
- ret = _kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ ret = __kvm_has_device_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST);
if (!ret) {
/* Attempt to mix legacy and new redistributor regions */
addr = REDIST_REGION_ATTR_ADDR(NR_VCPUS, 0x100000, 0, 0);
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION,
- &addr, true);
+ ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == EINVAL,
"attempt to mix GICv3 REDIST and REDIST_REGION");
}
@@ -195,8 +206,8 @@ static void subtest_dist_rdist(struct vm_gic *v)
* on first vcpu run instead.
*/
addr = rdist.size - rdist.alignment;
- kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- dist.attr, &addr, true);
+ kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ dist.attr, &addr);
}
/* Test the new REDIST region API */
@@ -205,71 +216,71 @@ static void subtest_v3_redist_regions(struct vm_gic *v)
uint64_t addr, expected_addr;
int ret;
- ret = kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST);
+ ret = __kvm_has_device_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST);
TEST_ASSERT(!ret, "Multiple redist regions advertised");
addr = REDIST_REGION_ATTR_ADDR(NR_VCPUS, 0x100000, 2, 0);
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == EINVAL, "redist region attr value with flags != 0");
addr = REDIST_REGION_ATTR_ADDR(0, 0x100000, 0, 0);
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == EINVAL, "redist region attr value with count== 0");
addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 1);
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == EINVAL,
"attempt to register the first rdist region with index != 0");
addr = REDIST_REGION_ATTR_ADDR(2, 0x201000, 0, 1);
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == EINVAL, "rdist region with misaligned address");
addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 0);
- kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 1);
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == EINVAL, "register an rdist region with already used index");
addr = REDIST_REGION_ATTR_ADDR(1, 0x210000, 0, 2);
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == EINVAL,
"register an rdist region overlapping with another one");
addr = REDIST_REGION_ATTR_ADDR(1, 0x240000, 0, 2);
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == EINVAL, "register redist region with index not +1");
addr = REDIST_REGION_ATTR_ADDR(1, 0x240000, 0, 1);
- kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
addr = REDIST_REGION_ATTR_ADDR(1, max_phys_size, 0, 2);
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == E2BIG,
"register redist region with base address beyond IPA range");
/* The last redist is above the pa range. */
addr = REDIST_REGION_ATTR_ADDR(2, max_phys_size - 0x30000, 0, 2);
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == E2BIG,
"register redist region with top address beyond IPA range");
addr = 0x260000;
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true);
+ ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr);
TEST_ASSERT(ret && errno == EINVAL,
"Mix KVM_VGIC_V3_ADDR_TYPE_REDIST and REDIST_REGION");
@@ -282,28 +293,28 @@ static void subtest_v3_redist_regions(struct vm_gic *v)
addr = REDIST_REGION_ATTR_ADDR(0, 0, 0, 0);
expected_addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 0);
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, false);
+ ret = __kvm_device_attr_get(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(!ret && addr == expected_addr, "read characteristics of region #0");
addr = REDIST_REGION_ATTR_ADDR(0, 0, 0, 1);
expected_addr = REDIST_REGION_ATTR_ADDR(1, 0x240000, 0, 1);
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, false);
+ ret = __kvm_device_attr_get(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(!ret && addr == expected_addr, "read characteristics of region #1");
addr = REDIST_REGION_ATTR_ADDR(0, 0, 0, 2);
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, false);
+ ret = __kvm_device_attr_get(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == ENOENT, "read characteristics of non existing region");
addr = 0x260000;
- kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_DIST, &addr, true);
+ kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_DIST, &addr);
addr = REDIST_REGION_ATTR_ADDR(1, 0x260000, 0, 2);
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ ret = __kvm_device_attr_set(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == EINVAL, "register redist region colliding with dist");
}
@@ -313,18 +324,19 @@ static void subtest_v3_redist_regions(struct vm_gic *v)
*/
static void test_vgic_then_vcpus(uint32_t gic_dev_type)
{
+ struct kvm_vcpu *vcpus[NR_VCPUS];
struct vm_gic v;
int ret, i;
- v = vm_gic_create_with_vcpus(gic_dev_type, 1);
+ v = vm_gic_create_with_vcpus(gic_dev_type, 1, vcpus);
subtest_dist_rdist(&v);
/* Add the rest of the VCPUs */
for (i = 1; i < NR_VCPUS; ++i)
- vm_vcpu_add_default(v.vm, i, guest_code);
+ vcpus[i] = vm_vcpu_add(v.vm, i, guest_code);
- ret = run_vcpu(v.vm, 3);
+ ret = run_vcpu(vcpus[3]);
TEST_ASSERT(ret == -EINVAL, "dist/rdist overlap detected on 1st vcpu run");
vm_gic_destroy(&v);
@@ -333,14 +345,15 @@ static void test_vgic_then_vcpus(uint32_t gic_dev_type)
/* All the VCPUs are created before the VGIC KVM device gets initialized */
static void test_vcpus_then_vgic(uint32_t gic_dev_type)
{
+ struct kvm_vcpu *vcpus[NR_VCPUS];
struct vm_gic v;
int ret;
- v = vm_gic_create_with_vcpus(gic_dev_type, NR_VCPUS);
+ v = vm_gic_create_with_vcpus(gic_dev_type, NR_VCPUS, vcpus);
subtest_dist_rdist(&v);
- ret = run_vcpu(v.vm, 3);
+ ret = run_vcpu(vcpus[3]);
TEST_ASSERT(ret == -EINVAL, "dist/rdist overlap detected on 1st vcpu run");
vm_gic_destroy(&v);
@@ -348,52 +361,53 @@ static void test_vcpus_then_vgic(uint32_t gic_dev_type)
static void test_v3_new_redist_regions(void)
{
+ struct kvm_vcpu *vcpus[NR_VCPUS];
void *dummy = NULL;
struct vm_gic v;
uint64_t addr;
int ret;
- v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS);
+ v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus);
subtest_v3_redist_regions(&v);
- kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
- KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
+ kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
- ret = run_vcpu(v.vm, 3);
+ ret = run_vcpu(vcpus[3]);
TEST_ASSERT(ret == -ENXIO, "running without sufficient number of rdists");
vm_gic_destroy(&v);
/* step2 */
- v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS);
+ v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus);
subtest_v3_redist_regions(&v);
addr = REDIST_REGION_ATTR_ADDR(1, 0x280000, 0, 2);
- kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
- ret = run_vcpu(v.vm, 3);
+ ret = run_vcpu(vcpus[3]);
TEST_ASSERT(ret == -EBUSY, "running without vgic explicit init");
vm_gic_destroy(&v);
/* step 3 */
- v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS);
+ v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus);
subtest_v3_redist_regions(&v);
- _kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, dummy, true);
+ ret = __kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, dummy);
TEST_ASSERT(ret && errno == EFAULT,
"register a third region allowing to cover the 4 vcpus");
addr = REDIST_REGION_ATTR_ADDR(1, 0x280000, 0, 2);
- kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
- kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
- KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
+ kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
- ret = run_vcpu(v.vm, 3);
+ ret = run_vcpu(vcpus[3]);
TEST_ASSERT(!ret, "vcpu run");
vm_gic_destroy(&v);
@@ -403,71 +417,77 @@ static void test_v3_typer_accesses(void)
{
struct vm_gic v;
uint64_t addr;
- uint32_t val;
int ret, i;
- v.vm = vm_create_default(0, 0, guest_code);
+ v.vm = vm_create(NR_VCPUS);
+ (void)vm_vcpu_add(v.vm, 0, guest_code);
- v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false);
+ v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3);
- vm_vcpu_add_default(v.vm, 3, guest_code);
+ (void)vm_vcpu_add(v.vm, 3, guest_code);
- ret = access_v3_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
- TEST_ASSERT(ret && errno == EINVAL, "attempting to read GICR_TYPER of non created vcpu");
+ v3_redist_reg_get_errno(v.gic_fd, 1, GICR_TYPER, EINVAL,
+ "attempting to read GICR_TYPER of non created vcpu");
- vm_vcpu_add_default(v.vm, 1, guest_code);
+ (void)vm_vcpu_add(v.vm, 1, guest_code);
- ret = access_v3_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
- TEST_ASSERT(ret && errno == EBUSY, "read GICR_TYPER before GIC initialized");
+ v3_redist_reg_get_errno(v.gic_fd, 1, GICR_TYPER, EBUSY,
+ "read GICR_TYPER before GIC initialized");
- vm_vcpu_add_default(v.vm, 2, guest_code);
+ (void)vm_vcpu_add(v.vm, 2, guest_code);
- kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
- KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
+ kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
for (i = 0; i < NR_VCPUS ; i++) {
- ret = access_v3_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false);
- TEST_ASSERT(!ret && !val, "read GICR_TYPER before rdist region setting");
+ v3_redist_reg_get(v.gic_fd, i, GICR_TYPER, i * 0x100,
+ "read GICR_TYPER before rdist region setting");
}
addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 0);
- kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
/* The 2 first rdists should be put there (vcpu 0 and 3) */
- ret = access_v3_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false);
- TEST_ASSERT(!ret && !val, "read typer of rdist #0");
-
- ret = access_v3_redist_reg(v.gic_fd, 3, GICR_TYPER, &val, false);
- TEST_ASSERT(!ret && val == 0x310, "read typer of rdist #1");
+ v3_redist_reg_get(v.gic_fd, 0, GICR_TYPER, 0x0, "read typer of rdist #0");
+ v3_redist_reg_get(v.gic_fd, 3, GICR_TYPER, 0x310, "read typer of rdist #1");
addr = REDIST_REGION_ATTR_ADDR(10, 0x100000, 0, 1);
- ret = _kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ ret = __kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
TEST_ASSERT(ret && errno == EINVAL, "collision with previous rdist region");
- ret = access_v3_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
- TEST_ASSERT(!ret && val == 0x100,
- "no redist region attached to vcpu #1 yet, last cannot be returned");
-
- ret = access_v3_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false);
- TEST_ASSERT(!ret && val == 0x200,
- "no redist region attached to vcpu #2, last cannot be returned");
+ v3_redist_reg_get(v.gic_fd, 1, GICR_TYPER, 0x100,
+ "no redist region attached to vcpu #1 yet, last cannot be returned");
+ v3_redist_reg_get(v.gic_fd, 2, GICR_TYPER, 0x200,
+ "no redist region attached to vcpu #2, last cannot be returned");
addr = REDIST_REGION_ATTR_ADDR(10, 0x20000, 0, 1);
- kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
- ret = access_v3_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
- TEST_ASSERT(!ret && val == 0x100, "read typer of rdist #1");
-
- ret = access_v3_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false);
- TEST_ASSERT(!ret && val == 0x210,
- "read typer of rdist #1, last properly returned");
+ v3_redist_reg_get(v.gic_fd, 1, GICR_TYPER, 0x100, "read typer of rdist #1");
+ v3_redist_reg_get(v.gic_fd, 2, GICR_TYPER, 0x210,
+ "read typer of rdist #1, last properly returned");
vm_gic_destroy(&v);
}
+static struct vm_gic vm_gic_v3_create_with_vcpuids(int nr_vcpus,
+ uint32_t vcpuids[])
+{
+ struct vm_gic v;
+ int i;
+
+ v.vm = vm_create(nr_vcpus);
+ for (i = 0; i < nr_vcpus; i++)
+ vm_vcpu_add(v.vm, vcpuids[i], guest_code);
+
+ v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3);
+
+ return v;
+}
+
/**
* Test GICR_TYPER last bit with new redist regions
* rdist regions #1 and #2 are contiguous
@@ -483,45 +503,30 @@ static void test_v3_last_bit_redist_regions(void)
uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 };
struct vm_gic v;
uint64_t addr;
- uint32_t val;
- int ret;
-
- v.vm = vm_create_default_with_vcpus(6, 0, 0, guest_code, vcpuids);
- v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false);
+ v = vm_gic_v3_create_with_vcpuids(ARRAY_SIZE(vcpuids), vcpuids);
- kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
- KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
+ kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
addr = REDIST_REGION_ATTR_ADDR(2, 0x100000, 0, 0);
- kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
addr = REDIST_REGION_ATTR_ADDR(2, 0x240000, 0, 1);
- kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 2);
- kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
-
- ret = access_v3_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false);
- TEST_ASSERT(!ret && val == 0x000, "read typer of rdist #0");
+ kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr);
- ret = access_v3_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
- TEST_ASSERT(!ret && val == 0x100, "read typer of rdist #1");
-
- ret = access_v3_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false);
- TEST_ASSERT(!ret && val == 0x200, "read typer of rdist #2");
-
- ret = access_v3_redist_reg(v.gic_fd, 3, GICR_TYPER, &val, false);
- TEST_ASSERT(!ret && val == 0x310, "read typer of rdist #3");
-
- ret = access_v3_redist_reg(v.gic_fd, 5, GICR_TYPER, &val, false);
- TEST_ASSERT(!ret && val == 0x500, "read typer of rdist #5");
-
- ret = access_v3_redist_reg(v.gic_fd, 4, GICR_TYPER, &val, false);
- TEST_ASSERT(!ret && val == 0x410, "read typer of rdist #4");
+ v3_redist_reg_get(v.gic_fd, 0, GICR_TYPER, 0x000, "read typer of rdist #0");
+ v3_redist_reg_get(v.gic_fd, 1, GICR_TYPER, 0x100, "read typer of rdist #1");
+ v3_redist_reg_get(v.gic_fd, 2, GICR_TYPER, 0x200, "read typer of rdist #2");
+ v3_redist_reg_get(v.gic_fd, 3, GICR_TYPER, 0x310, "read typer of rdist #3");
+ v3_redist_reg_get(v.gic_fd, 5, GICR_TYPER, 0x500, "read typer of rdist #5");
+ v3_redist_reg_get(v.gic_fd, 4, GICR_TYPER, 0x410, "read typer of rdist #4");
vm_gic_destroy(&v);
}
@@ -532,34 +537,21 @@ static void test_v3_last_bit_single_rdist(void)
uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 };
struct vm_gic v;
uint64_t addr;
- uint32_t val;
- int ret;
-
- v.vm = vm_create_default_with_vcpus(6, 0, 0, guest_code, vcpuids);
- v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false);
+ v = vm_gic_v3_create_with_vcpuids(ARRAY_SIZE(vcpuids), vcpuids);
- kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
- KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
+ kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
addr = 0x10000;
- kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true);
-
- ret = access_v3_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false);
- TEST_ASSERT(!ret && val == 0x000, "read typer of rdist #0");
-
- ret = access_v3_redist_reg(v.gic_fd, 3, GICR_TYPER, &val, false);
- TEST_ASSERT(!ret && val == 0x300, "read typer of rdist #1");
-
- ret = access_v3_redist_reg(v.gic_fd, 5, GICR_TYPER, &val, false);
- TEST_ASSERT(!ret && val == 0x500, "read typer of rdist #2");
+ kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr);
- ret = access_v3_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
- TEST_ASSERT(!ret && val == 0x100, "read typer of rdist #3");
-
- ret = access_v3_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false);
- TEST_ASSERT(!ret && val == 0x210, "read typer of rdist #3");
+ v3_redist_reg_get(v.gic_fd, 0, GICR_TYPER, 0x000, "read typer of rdist #0");
+ v3_redist_reg_get(v.gic_fd, 3, GICR_TYPER, 0x300, "read typer of rdist #1");
+ v3_redist_reg_get(v.gic_fd, 5, GICR_TYPER, 0x500, "read typer of rdist #2");
+ v3_redist_reg_get(v.gic_fd, 1, GICR_TYPER, 0x100, "read typer of rdist #3");
+ v3_redist_reg_get(v.gic_fd, 2, GICR_TYPER, 0x210, "read typer of rdist #3");
vm_gic_destroy(&v);
}
@@ -567,30 +559,31 @@ static void test_v3_last_bit_single_rdist(void)
/* Uses the legacy REDIST region API. */
static void test_v3_redist_ipa_range_check_at_vcpu_run(void)
{
+ struct kvm_vcpu *vcpus[NR_VCPUS];
struct vm_gic v;
int ret, i;
uint64_t addr;
- v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, 1);
+ v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, 1, vcpus);
/* Set space for 3 redists, we have 1 vcpu, so this succeeds. */
addr = max_phys_size - (3 * 2 * 0x10000);
- kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true);
+ kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr);
addr = 0x00000;
- kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_DIST, &addr, true);
+ kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_DIST, &addr);
/* Add the rest of the VCPUs */
for (i = 1; i < NR_VCPUS; ++i)
- vm_vcpu_add_default(v.vm, i, guest_code);
+ vcpus[i] = vm_vcpu_add(v.vm, i, guest_code);
- kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
- KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
+ kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
/* Attempt to run a vcpu without enough redist space. */
- ret = run_vcpu(v.vm, 2);
+ ret = run_vcpu(vcpus[2]);
TEST_ASSERT(ret && errno == EINVAL,
"redist base+size above PA range detected on 1st vcpu run");
@@ -599,39 +592,40 @@ static void test_v3_redist_ipa_range_check_at_vcpu_run(void)
static void test_v3_its_region(void)
{
+ struct kvm_vcpu *vcpus[NR_VCPUS];
struct vm_gic v;
uint64_t addr;
int its_fd, ret;
- v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS);
- its_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_ITS, false);
+ v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS, vcpus);
+ its_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_ITS);
addr = 0x401000;
- ret = _kvm_device_access(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_ITS_ADDR_TYPE, &addr, true);
+ ret = __kvm_device_attr_set(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_ITS_ADDR_TYPE, &addr);
TEST_ASSERT(ret && errno == EINVAL,
"ITS region with misaligned address");
addr = max_phys_size;
- ret = _kvm_device_access(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_ITS_ADDR_TYPE, &addr, true);
+ ret = __kvm_device_attr_set(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_ITS_ADDR_TYPE, &addr);
TEST_ASSERT(ret && errno == E2BIG,
"register ITS region with base address beyond IPA range");
addr = max_phys_size - 0x10000;
- ret = _kvm_device_access(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_ITS_ADDR_TYPE, &addr, true);
+ ret = __kvm_device_attr_set(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_ITS_ADDR_TYPE, &addr);
TEST_ASSERT(ret && errno == E2BIG,
"Half of ITS region is beyond IPA range");
/* This one succeeds setting the ITS base */
addr = 0x400000;
- kvm_device_access(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_ITS_ADDR_TYPE, &addr, true);
+ kvm_device_attr_set(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_ITS_ADDR_TYPE, &addr);
addr = 0x300000;
- ret = _kvm_device_access(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_ITS_ADDR_TYPE, &addr, true);
+ ret = __kvm_device_attr_set(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_ITS_ADDR_TYPE, &addr);
TEST_ASSERT(ret && errno == EEXIST, "ITS base set again");
close(its_fd);
@@ -643,34 +637,33 @@ static void test_v3_its_region(void)
*/
int test_kvm_device(uint32_t gic_dev_type)
{
+ struct kvm_vcpu *vcpus[NR_VCPUS];
struct vm_gic v;
- int ret, fd;
uint32_t other;
+ int ret;
- v.vm = vm_create_default_with_vcpus(NR_VCPUS, 0, 0, guest_code, NULL);
+ v.vm = vm_create_with_vcpus(NR_VCPUS, guest_code, vcpus);
/* try to create a non existing KVM device */
- ret = _kvm_create_device(v.vm, 0, true, &fd);
+ ret = __kvm_test_create_device(v.vm, 0);
TEST_ASSERT(ret && errno == ENODEV, "unsupported device");
/* trial mode */
- ret = _kvm_create_device(v.vm, gic_dev_type, true, &fd);
+ ret = __kvm_test_create_device(v.vm, gic_dev_type);
if (ret)
return ret;
- v.gic_fd = kvm_create_device(v.vm, gic_dev_type, false);
-
- ret = _kvm_create_device(v.vm, gic_dev_type, false, &fd);
- TEST_ASSERT(ret && errno == EEXIST, "create GIC device twice");
+ v.gic_fd = kvm_create_device(v.vm, gic_dev_type);
- kvm_create_device(v.vm, gic_dev_type, true);
+ ret = __kvm_create_device(v.vm, gic_dev_type);
+ TEST_ASSERT(ret < 0 && errno == EEXIST, "create GIC device twice");
/* try to create the other gic_dev_type */
other = VGIC_DEV_IS_V2(gic_dev_type) ? KVM_DEV_TYPE_ARM_VGIC_V3
: KVM_DEV_TYPE_ARM_VGIC_V2;
- if (!_kvm_create_device(v.vm, other, true, &fd)) {
- ret = _kvm_create_device(v.vm, other, false, &fd);
- TEST_ASSERT(ret && errno == EINVAL,
+ if (!__kvm_test_create_device(v.vm, other)) {
+ ret = __kvm_test_create_device(v.vm, other);
+ TEST_ASSERT(ret && (errno == EINVAL || errno == EEXIST),
"create GIC device while other version exists");
}
@@ -698,6 +691,7 @@ int main(int ac, char **av)
{
int ret;
int pa_bits;
+ int cnt_impl = 0;
pa_bits = vm_guest_mode_params[VM_MODE_DEFAULT].pa_bits;
max_phys_size = 1ULL << pa_bits;
@@ -706,17 +700,19 @@ int main(int ac, char **av)
if (!ret) {
pr_info("Running GIC_v3 tests.\n");
run_tests(KVM_DEV_TYPE_ARM_VGIC_V3);
- return 0;
+ cnt_impl++;
}
ret = test_kvm_device(KVM_DEV_TYPE_ARM_VGIC_V2);
if (!ret) {
pr_info("Running GIC_v2 tests.\n");
run_tests(KVM_DEV_TYPE_ARM_VGIC_V2);
- return 0;
+ cnt_impl++;
}
- print_skip("No GICv2 nor GICv3 support");
- exit(KSFT_SKIP);
+ if (!cnt_impl) {
+ print_skip("No GICv2 nor GICv3 support");
+ exit(KSFT_SKIP);
+ }
return 0;
}
diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c
index 554ca649d470..17417220a083 100644
--- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c
+++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c
@@ -22,7 +22,6 @@
#define GICD_BASE_GPA 0x08000000ULL
#define GICR_BASE_GPA 0x080A0000ULL
-#define VCPU_ID 0
/*
* Stores the user specified args; it's passed to the guest and to every test
@@ -589,7 +588,8 @@ static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm,
}
static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid,
- uint32_t vcpu, bool expect_failure)
+ struct kvm_vcpu *vcpu,
+ bool expect_failure)
{
/*
* Ignore this when expecting failure as invalid intids will lead to
@@ -630,8 +630,7 @@ static void kvm_routing_and_irqfd_check(struct kvm_vm *vm,
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
fd[f] = eventfd(0, 0);
- TEST_ASSERT(fd[f] != -1,
- "eventfd failed, errno: %i\n", errno);
+ TEST_ASSERT(fd[f] != -1, __KVM_SYSCALL_ERROR("eventfd()", fd[f]));
}
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
@@ -647,7 +646,7 @@ static void kvm_routing_and_irqfd_check(struct kvm_vm *vm,
val = 1;
ret = write(fd[f], &val, sizeof(uint64_t));
TEST_ASSERT(ret == sizeof(uint64_t),
- "Write to KVM_IRQFD failed with ret: %d\n", ret);
+ __KVM_SYSCALL_ERROR("write()", ret));
}
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++)
@@ -660,15 +659,16 @@ static void kvm_routing_and_irqfd_check(struct kvm_vm *vm,
(tmp) < (uint64_t)(first) + (uint64_t)(num); \
(tmp)++, (i)++)
-static void run_guest_cmd(struct kvm_vm *vm, int gic_fd,
- struct kvm_inject_args *inject_args,
- struct test_args *test_args)
+static void run_guest_cmd(struct kvm_vcpu *vcpu, int gic_fd,
+ struct kvm_inject_args *inject_args,
+ struct test_args *test_args)
{
kvm_inject_cmd cmd = inject_args->cmd;
uint32_t intid = inject_args->first_intid;
uint32_t num = inject_args->num;
int level = inject_args->level;
bool expect_failure = inject_args->expect_failure;
+ struct kvm_vm *vm = vcpu->vm;
uint64_t tmp;
uint32_t i;
@@ -706,12 +706,12 @@ static void run_guest_cmd(struct kvm_vm *vm, int gic_fd,
break;
case KVM_WRITE_ISPENDR:
for (i = intid; i < intid + num; i++)
- kvm_irq_write_ispendr_check(gic_fd, i,
- VCPU_ID, expect_failure);
+ kvm_irq_write_ispendr_check(gic_fd, i, vcpu,
+ expect_failure);
break;
case KVM_WRITE_ISACTIVER:
for (i = intid; i < intid + num; i++)
- kvm_irq_write_isactiver(gic_fd, i, VCPU_ID);
+ kvm_irq_write_isactiver(gic_fd, i, vcpu);
break;
default:
break;
@@ -740,6 +740,7 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
{
struct ucall uc;
int gic_fd;
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_inject_args inject_args;
vm_vaddr_t args_gva;
@@ -754,39 +755,34 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
print_args(&args);
- vm = vm_create_default(VCPU_ID, 0, guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
ucall_init(vm, NULL);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, VCPU_ID);
+ vcpu_init_descriptor_tables(vcpu);
/* Setup the guest args page (so it gets the args). */
args_gva = vm_vaddr_alloc_page(vm);
memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args));
- vcpu_args_set(vm, 0, 1, args_gva);
+ vcpu_args_set(vcpu, 1, args_gva);
gic_fd = vgic_v3_setup(vm, 1, nr_irqs,
GICD_BASE_GPA, GICR_BASE_GPA);
- if (gic_fd < 0) {
- print_skip("Failed to create vgic-v3, skipping");
- exit(KSFT_SKIP);
- }
+ __TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3, skipping");
vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT,
guest_irq_handlers[args.eoi_split][args.level_sensitive]);
while (1) {
- vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
kvm_inject_get_call(vm, &uc, &inject_args);
- run_guest_cmd(vm, gic_fd, &inject_args, &args);
+ run_guest_cmd(vcpu, gic_fd, &inject_args, &args);
break;
case UCALL_ABORT:
- TEST_FAIL("%s at %s:%ld\n\tvalues: %#lx, %#lx",
- (const char *)uc.args[0],
- __FILE__, uc.args[1], uc.args[2], uc.args[3]);
+ REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
break;
case UCALL_DONE:
goto done;
diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c
index d8909032317a..1c2749b1481a 100644
--- a/tools/testing/selftests/kvm/access_tracking_perf_test.c
+++ b/tools/testing/selftests/kvm/access_tracking_perf_test.c
@@ -74,7 +74,7 @@ struct test_params {
uint64_t vcpu_memory_bytes;
/* The number of vCPUs to create in the VM. */
- int vcpus;
+ int nr_vcpus;
};
static uint64_t pread_uint64(int fd, const char *filename, uint64_t index)
@@ -104,10 +104,7 @@ static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva)
return 0;
pfn = entry & PAGEMAP_PFN_MASK;
- if (!pfn) {
- print_skip("Looking up PFNs requires CAP_SYS_ADMIN");
- exit(KSFT_SKIP);
- }
+ __TEST_REQUIRE(pfn, "Looking up PFNs requires CAP_SYS_ADMIN");
return pfn;
}
@@ -127,10 +124,12 @@ static void mark_page_idle(int page_idle_fd, uint64_t pfn)
"Set page_idle bits for PFN 0x%" PRIx64, pfn);
}
-static void mark_vcpu_memory_idle(struct kvm_vm *vm, int vcpu_id)
+static void mark_vcpu_memory_idle(struct kvm_vm *vm,
+ struct perf_test_vcpu_args *vcpu_args)
{
- uint64_t base_gva = perf_test_args.vcpu_args[vcpu_id].gva;
- uint64_t pages = perf_test_args.vcpu_args[vcpu_id].pages;
+ int vcpu_idx = vcpu_args->vcpu_idx;
+ uint64_t base_gva = vcpu_args->gva;
+ uint64_t pages = vcpu_args->pages;
uint64_t page;
uint64_t still_idle = 0;
uint64_t no_pfn = 0;
@@ -138,7 +137,7 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm, int vcpu_id)
int pagemap_fd;
/* If vCPUs are using an overlapping region, let vCPU 0 mark it idle. */
- if (overlap_memory_access && vcpu_id)
+ if (overlap_memory_access && vcpu_idx)
return;
page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
@@ -170,7 +169,7 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm, int vcpu_id)
*/
TEST_ASSERT(no_pfn < pages / 100,
"vCPU %d: No PFN for %" PRIu64 " out of %" PRIu64 " pages.",
- vcpu_id, no_pfn, pages);
+ vcpu_idx, no_pfn, pages);
/*
* Test that at least 90% of memory has been marked idle (the rest might
@@ -183,17 +182,16 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm, int vcpu_id)
TEST_ASSERT(still_idle < pages / 10,
"vCPU%d: Too many pages still idle (%"PRIu64 " out of %"
PRIu64 ").\n",
- vcpu_id, still_idle, pages);
+ vcpu_idx, still_idle, pages);
close(page_idle_fd);
close(pagemap_fd);
}
-static void assert_ucall(struct kvm_vm *vm, uint32_t vcpu_id,
- uint64_t expected_ucall)
+static void assert_ucall(struct kvm_vcpu *vcpu, uint64_t expected_ucall)
{
struct ucall uc;
- uint64_t actual_ucall = get_ucall(vm, vcpu_id, &uc);
+ uint64_t actual_ucall = get_ucall(vcpu, &uc);
TEST_ASSERT(expected_ucall == actual_ucall,
"Guest exited unexpectedly (expected ucall %" PRIu64
@@ -217,28 +215,29 @@ static bool spin_wait_for_next_iteration(int *current_iteration)
static void vcpu_thread_main(struct perf_test_vcpu_args *vcpu_args)
{
+ struct kvm_vcpu *vcpu = vcpu_args->vcpu;
struct kvm_vm *vm = perf_test_args.vm;
- int vcpu_id = vcpu_args->vcpu_id;
+ int vcpu_idx = vcpu_args->vcpu_idx;
int current_iteration = 0;
while (spin_wait_for_next_iteration(&current_iteration)) {
switch (READ_ONCE(iteration_work)) {
case ITERATION_ACCESS_MEMORY:
- vcpu_run(vm, vcpu_id);
- assert_ucall(vm, vcpu_id, UCALL_SYNC);
+ vcpu_run(vcpu);
+ assert_ucall(vcpu, UCALL_SYNC);
break;
case ITERATION_MARK_IDLE:
- mark_vcpu_memory_idle(vm, vcpu_id);
+ mark_vcpu_memory_idle(vm, vcpu_args);
break;
};
- vcpu_last_completed_iteration[vcpu_id] = current_iteration;
+ vcpu_last_completed_iteration[vcpu_idx] = current_iteration;
}
}
-static void spin_wait_for_vcpu(int vcpu_id, int target_iteration)
+static void spin_wait_for_vcpu(int vcpu_idx, int target_iteration)
{
- while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) !=
+ while (READ_ONCE(vcpu_last_completed_iteration[vcpu_idx]) !=
target_iteration) {
continue;
}
@@ -250,12 +249,11 @@ enum access_type {
ACCESS_WRITE,
};
-static void run_iteration(struct kvm_vm *vm, int vcpus, const char *description)
+static void run_iteration(struct kvm_vm *vm, int nr_vcpus, const char *description)
{
struct timespec ts_start;
struct timespec ts_elapsed;
- int next_iteration;
- int vcpu_id;
+ int next_iteration, i;
/* Kick off the vCPUs by incrementing iteration. */
next_iteration = ++iteration;
@@ -263,23 +261,23 @@ static void run_iteration(struct kvm_vm *vm, int vcpus, const char *description)
clock_gettime(CLOCK_MONOTONIC, &ts_start);
/* Wait for all vCPUs to finish the iteration. */
- for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++)
- spin_wait_for_vcpu(vcpu_id, next_iteration);
+ for (i = 0; i < nr_vcpus; i++)
+ spin_wait_for_vcpu(i, next_iteration);
ts_elapsed = timespec_elapsed(ts_start);
pr_info("%-30s: %ld.%09lds\n",
description, ts_elapsed.tv_sec, ts_elapsed.tv_nsec);
}
-static void access_memory(struct kvm_vm *vm, int vcpus, enum access_type access,
- const char *description)
+static void access_memory(struct kvm_vm *vm, int nr_vcpus,
+ enum access_type access, const char *description)
{
perf_test_set_wr_fract(vm, (access == ACCESS_READ) ? INT_MAX : 1);
iteration_work = ITERATION_ACCESS_MEMORY;
- run_iteration(vm, vcpus, description);
+ run_iteration(vm, nr_vcpus, description);
}
-static void mark_memory_idle(struct kvm_vm *vm, int vcpus)
+static void mark_memory_idle(struct kvm_vm *vm, int nr_vcpus)
{
/*
* Even though this parallelizes the work across vCPUs, this is still a
@@ -289,37 +287,37 @@ static void mark_memory_idle(struct kvm_vm *vm, int vcpus)
*/
pr_debug("Marking VM memory idle (slow)...\n");
iteration_work = ITERATION_MARK_IDLE;
- run_iteration(vm, vcpus, "Mark memory idle");
+ run_iteration(vm, nr_vcpus, "Mark memory idle");
}
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct test_params *params = arg;
struct kvm_vm *vm;
- int vcpus = params->vcpus;
+ int nr_vcpus = params->nr_vcpus;
- vm = perf_test_create_vm(mode, vcpus, params->vcpu_memory_bytes, 1,
+ vm = perf_test_create_vm(mode, nr_vcpus, params->vcpu_memory_bytes, 1,
params->backing_src, !overlap_memory_access);
- perf_test_start_vcpu_threads(vcpus, vcpu_thread_main);
+ perf_test_start_vcpu_threads(nr_vcpus, vcpu_thread_main);
pr_info("\n");
- access_memory(vm, vcpus, ACCESS_WRITE, "Populating memory");
+ access_memory(vm, nr_vcpus, ACCESS_WRITE, "Populating memory");
/* As a control, read and write to the populated memory first. */
- access_memory(vm, vcpus, ACCESS_WRITE, "Writing to populated memory");
- access_memory(vm, vcpus, ACCESS_READ, "Reading from populated memory");
+ access_memory(vm, nr_vcpus, ACCESS_WRITE, "Writing to populated memory");
+ access_memory(vm, nr_vcpus, ACCESS_READ, "Reading from populated memory");
/* Repeat on memory that has been marked as idle. */
- mark_memory_idle(vm, vcpus);
- access_memory(vm, vcpus, ACCESS_WRITE, "Writing to idle memory");
- mark_memory_idle(vm, vcpus);
- access_memory(vm, vcpus, ACCESS_READ, "Reading from idle memory");
+ mark_memory_idle(vm, nr_vcpus);
+ access_memory(vm, nr_vcpus, ACCESS_WRITE, "Writing to idle memory");
+ mark_memory_idle(vm, nr_vcpus);
+ access_memory(vm, nr_vcpus, ACCESS_READ, "Reading from idle memory");
/* Set done to signal the vCPU threads to exit */
done = true;
- perf_test_join_vcpu_threads(vcpus);
+ perf_test_join_vcpu_threads(nr_vcpus);
perf_test_destroy_vm(vm);
}
@@ -347,7 +345,7 @@ int main(int argc, char *argv[])
struct test_params params = {
.backing_src = DEFAULT_VM_MEM_SRC,
.vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE,
- .vcpus = 1,
+ .nr_vcpus = 1,
};
int page_idle_fd;
int opt;
@@ -363,7 +361,7 @@ int main(int argc, char *argv[])
params.vcpu_memory_bytes = parse_size(optarg);
break;
case 'v':
- params.vcpus = atoi(optarg);
+ params.nr_vcpus = atoi(optarg);
break;
case 'o':
overlap_memory_access = true;
@@ -379,10 +377,8 @@ int main(int argc, char *argv[])
}
page_idle_fd = open("/sys/kernel/mm/page_idle/bitmap", O_RDWR);
- if (page_idle_fd < 0) {
- print_skip("CONFIG_IDLE_PAGE_TRACKING is not enabled");
- exit(KSFT_SKIP);
- }
+ __TEST_REQUIRE(page_idle_fd >= 0,
+ "CONFIG_IDLE_PAGE_TRACKING is not enabled");
close(page_idle_fd);
for_each_guest_mode(run_test, &params);
diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
index 6a719d065599..779ae54f89c4 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -44,28 +44,26 @@ static char *guest_data_prototype;
static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
{
- int ret;
- int vcpu_id = vcpu_args->vcpu_id;
- struct kvm_vm *vm = perf_test_args.vm;
- struct kvm_run *run;
+ struct kvm_vcpu *vcpu = vcpu_args->vcpu;
+ int vcpu_idx = vcpu_args->vcpu_idx;
+ struct kvm_run *run = vcpu->run;
struct timespec start;
struct timespec ts_diff;
-
- run = vcpu_state(vm, vcpu_id);
+ int ret;
clock_gettime(CLOCK_MONOTONIC, &start);
/* Let the guest access its memory */
- ret = _vcpu_run(vm, vcpu_id);
+ ret = _vcpu_run(vcpu);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
- if (get_ucall(vm, vcpu_id, NULL) != UCALL_SYNC) {
+ if (get_ucall(vcpu, NULL) != UCALL_SYNC) {
TEST_ASSERT(false,
"Invalid guest sync status: exit_reason=%s\n",
exit_reason_str(run->exit_reason));
}
ts_diff = timespec_elapsed(start);
- PER_VCPU_DEBUG("vCPU %d execution time: %ld.%.9lds\n", vcpu_id,
+ PER_VCPU_DEBUG("vCPU %d execution time: %ld.%.9lds\n", vcpu_idx,
ts_diff.tv_sec, ts_diff.tv_nsec);
}
@@ -223,6 +221,7 @@ static void setup_demand_paging(struct kvm_vm *vm,
struct uffdio_api uffdio_api;
struct uffdio_register uffdio_register;
uint64_t expected_ioctls = ((uint64_t) 1) << _UFFDIO_COPY;
+ int ret;
PER_PAGE_DEBUG("Userfaultfd %s mode, faults resolved with %s\n",
is_minor ? "MINOR" : "MISSING",
@@ -242,19 +241,18 @@ static void setup_demand_paging(struct kvm_vm *vm,
}
uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
- TEST_ASSERT(uffd >= 0, "uffd creation failed, errno: %d", errno);
+ TEST_ASSERT(uffd >= 0, __KVM_SYSCALL_ERROR("userfaultfd()", uffd));
uffdio_api.api = UFFD_API;
uffdio_api.features = 0;
- TEST_ASSERT(ioctl(uffd, UFFDIO_API, &uffdio_api) != -1,
- "ioctl UFFDIO_API failed: %" PRIu64,
- (uint64_t)uffdio_api.api);
+ ret = ioctl(uffd, UFFDIO_API, &uffdio_api);
+ TEST_ASSERT(ret != -1, __KVM_SYSCALL_ERROR("UFFDIO_API", ret));
uffdio_register.range.start = (uint64_t)hva;
uffdio_register.range.len = len;
uffdio_register.mode = uffd_mode;
- TEST_ASSERT(ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) != -1,
- "ioctl UFFDIO_REGISTER failed");
+ ret = ioctl(uffd, UFFDIO_REGISTER, &uffdio_register);
+ TEST_ASSERT(ret != -1, __KVM_SYSCALL_ERROR("UFFDIO_REGISTER", ret));
TEST_ASSERT((uffdio_register.ioctls & expected_ioctls) ==
expected_ioctls, "missing userfaultfd ioctls");
@@ -285,8 +283,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct timespec ts_diff;
int *pipefds = NULL;
struct kvm_vm *vm;
- int vcpu_id;
- int r;
+ int r, i;
vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
p->src_type, p->partition_vcpu_memory_access);
@@ -309,12 +306,12 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pipefds = malloc(sizeof(int) * nr_vcpus * 2);
TEST_ASSERT(pipefds, "Unable to allocate memory for pipefd");
- for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
+ for (i = 0; i < nr_vcpus; i++) {
struct perf_test_vcpu_args *vcpu_args;
void *vcpu_hva;
void *vcpu_alias;
- vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
+ vcpu_args = &perf_test_args.vcpu_args[i];
/* Cache the host addresses of the region */
vcpu_hva = addr_gpa2hva(vm, vcpu_args->gpa);
@@ -324,13 +321,13 @@ static void run_test(enum vm_guest_mode mode, void *arg)
* Set up user fault fd to handle demand paging
* requests.
*/
- r = pipe2(&pipefds[vcpu_id * 2],
+ r = pipe2(&pipefds[i * 2],
O_CLOEXEC | O_NONBLOCK);
TEST_ASSERT(!r, "Failed to set up pipefd");
- setup_demand_paging(vm, &uffd_handler_threads[vcpu_id],
- pipefds[vcpu_id * 2], p->uffd_mode,
- p->uffd_delay, &uffd_args[vcpu_id],
+ setup_demand_paging(vm, &uffd_handler_threads[i],
+ pipefds[i * 2], p->uffd_mode,
+ p->uffd_delay, &uffd_args[i],
vcpu_hva, vcpu_alias,
vcpu_args->pages * perf_test_args.guest_page_size);
}
@@ -350,11 +347,11 @@ static void run_test(enum vm_guest_mode mode, void *arg)
char c;
/* Tell the user fault fd handler threads to quit */
- for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
- r = write(pipefds[vcpu_id * 2 + 1], &c, 1);
+ for (i = 0; i < nr_vcpus; i++) {
+ r = write(pipefds[i * 2 + 1], &c, 1);
TEST_ASSERT(r == 1, "Unable to write to pipefd");
- pthread_join(uffd_handler_threads[vcpu_id], NULL);
+ pthread_join(uffd_handler_threads[i], NULL);
}
}
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index 7b47ae4f952e..f99e39a672d3 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -59,6 +59,7 @@ static void arch_cleanup_vm(struct kvm_vm *vm)
static int nr_vcpus = 1;
static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
+static bool run_vcpus_while_disabling_dirty_logging;
/* Host variables */
static u64 dirty_log_manual_caps;
@@ -68,54 +69,59 @@ static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
{
- int ret;
- struct kvm_vm *vm = perf_test_args.vm;
+ struct kvm_vcpu *vcpu = vcpu_args->vcpu;
+ int vcpu_idx = vcpu_args->vcpu_idx;
uint64_t pages_count = 0;
struct kvm_run *run;
struct timespec start;
struct timespec ts_diff;
struct timespec total = (struct timespec){0};
struct timespec avg;
- int vcpu_id = vcpu_args->vcpu_id;
+ int ret;
- run = vcpu_state(vm, vcpu_id);
+ run = vcpu->run;
while (!READ_ONCE(host_quit)) {
int current_iteration = READ_ONCE(iteration);
clock_gettime(CLOCK_MONOTONIC, &start);
- ret = _vcpu_run(vm, vcpu_id);
+ ret = _vcpu_run(vcpu);
ts_diff = timespec_elapsed(start);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
- TEST_ASSERT(get_ucall(vm, vcpu_id, NULL) == UCALL_SYNC,
+ TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Invalid guest sync status: exit_reason=%s\n",
exit_reason_str(run->exit_reason));
- pr_debug("Got sync event from vCPU %d\n", vcpu_id);
- vcpu_last_completed_iteration[vcpu_id] = current_iteration;
+ pr_debug("Got sync event from vCPU %d\n", vcpu_idx);
+ vcpu_last_completed_iteration[vcpu_idx] = current_iteration;
pr_debug("vCPU %d updated last completed iteration to %d\n",
- vcpu_id, vcpu_last_completed_iteration[vcpu_id]);
+ vcpu_idx, vcpu_last_completed_iteration[vcpu_idx]);
if (current_iteration) {
pages_count += vcpu_args->pages;
total = timespec_add(total, ts_diff);
pr_debug("vCPU %d iteration %d dirty memory time: %ld.%.9lds\n",
- vcpu_id, current_iteration, ts_diff.tv_sec,
+ vcpu_idx, current_iteration, ts_diff.tv_sec,
ts_diff.tv_nsec);
} else {
pr_debug("vCPU %d iteration %d populate memory time: %ld.%.9lds\n",
- vcpu_id, current_iteration, ts_diff.tv_sec,
+ vcpu_idx, current_iteration, ts_diff.tv_sec,
ts_diff.tv_nsec);
}
+ /*
+ * Keep running the guest while dirty logging is being disabled
+ * (iteration is negative) so that vCPUs are accessing memory
+ * for the entire duration of zapping collapsible SPTEs.
+ */
while (current_iteration == READ_ONCE(iteration) &&
- !READ_ONCE(host_quit)) {}
+ READ_ONCE(iteration) >= 0 && !READ_ONCE(host_quit)) {}
}
- avg = timespec_div(total, vcpu_last_completed_iteration[vcpu_id]);
+ avg = timespec_div(total, vcpu_last_completed_iteration[vcpu_idx]);
pr_debug("\nvCPU %d dirtied 0x%lx pages over %d iterations in %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
- vcpu_id, pages_count, vcpu_last_completed_iteration[vcpu_id],
+ vcpu_idx, pages_count, vcpu_last_completed_iteration[vcpu_idx],
total.tv_sec, total.tv_nsec, avg.tv_sec, avg.tv_nsec);
}
@@ -207,14 +213,13 @@ static void run_test(enum vm_guest_mode mode, void *arg)
uint64_t guest_num_pages;
uint64_t host_num_pages;
uint64_t pages_per_slot;
- int vcpu_id;
struct timespec start;
struct timespec ts_diff;
struct timespec get_dirty_log_total = (struct timespec){0};
struct timespec vcpu_dirty_total = (struct timespec){0};
struct timespec avg;
- struct kvm_enable_cap cap = {};
struct timespec clear_dirty_log_total = (struct timespec){0};
+ int i;
vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
p->slots, p->backing_src,
@@ -222,18 +227,16 @@ static void run_test(enum vm_guest_mode mode, void *arg)
perf_test_set_wr_fract(vm, p->wr_fract);
- guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
+ guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm->page_shift;
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
host_num_pages = vm_num_host_pages(mode, guest_num_pages);
pages_per_slot = host_num_pages / p->slots;
bitmaps = alloc_bitmaps(p->slots, pages_per_slot);
- if (dirty_log_manual_caps) {
- cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
- cap.args[0] = dirty_log_manual_caps;
- vm_enable_cap(vm, &cap);
- }
+ if (dirty_log_manual_caps)
+ vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2,
+ dirty_log_manual_caps);
arch_setup_vm(vm, nr_vcpus);
@@ -242,15 +245,15 @@ static void run_test(enum vm_guest_mode mode, void *arg)
host_quit = false;
clock_gettime(CLOCK_MONOTONIC, &start);
- for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
- vcpu_last_completed_iteration[vcpu_id] = -1;
+ for (i = 0; i < nr_vcpus; i++)
+ vcpu_last_completed_iteration[i] = -1;
perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
/* Allow the vCPUs to populate memory */
pr_debug("Starting iteration %d - Populating\n", iteration);
- for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
- while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id]) !=
+ for (i = 0; i < nr_vcpus; i++) {
+ while (READ_ONCE(vcpu_last_completed_iteration[i]) !=
iteration)
;
}
@@ -275,8 +278,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
iteration++;
pr_debug("Starting iteration %d\n", iteration);
- for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
- while (READ_ONCE(vcpu_last_completed_iteration[vcpu_id])
+ for (i = 0; i < nr_vcpus; i++) {
+ while (READ_ONCE(vcpu_last_completed_iteration[i])
!= iteration)
;
}
@@ -305,6 +308,14 @@ static void run_test(enum vm_guest_mode mode, void *arg)
}
}
+ /*
+ * Run vCPUs while dirty logging is being disabled to stress disabling
+ * in terms of both performance and correctness. Opt-in via command
+ * line as this significantly increases time to disable dirty logging.
+ */
+ if (run_vcpus_while_disabling_dirty_logging)
+ WRITE_ONCE(iteration, -1);
+
/* Disable dirty logging */
clock_gettime(CLOCK_MONOTONIC, &start);
disable_dirty_logging(vm, p->slots);
@@ -312,7 +323,11 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("Disabling dirty logging time: %ld.%.9lds\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
- /* Tell the vcpu thread to quit */
+ /*
+ * Tell the vCPU threads to quit. No need to manually check that vCPUs
+ * have stopped running after disabling dirty logging, the join will
+ * wait for them to exit.
+ */
host_quit = true;
perf_test_join_vcpu_threads(nr_vcpus);
@@ -336,8 +351,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
static void help(char *name)
{
puts("");
- printf("usage: %s [-h] [-i iterations] [-p offset] [-g]"
- "[-m mode] [-b vcpu bytes] [-v vcpus] [-o] [-s mem type]"
+ printf("usage: %s [-h] [-i iterations] [-p offset] [-g] "
+ "[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-s mem type]"
"[-x memslots]\n", name);
puts("");
printf(" -i: specify iteration counts (default: %"PRIu64")\n",
@@ -351,6 +366,10 @@ static void help(char *name)
printf(" -p: specify guest physical test memory offset\n"
" Warning: a low offset can conflict with the loaded test code.\n");
guest_modes_help();
+ printf(" -n: Run the vCPUs in nested mode (L2)\n");
+ printf(" -e: Run vCPUs while dirty logging is being disabled. This\n"
+ " can significantly increase runtime, especially if there\n"
+ " isn't a dedicated pCPU for the main thread.\n");
printf(" -b: specify the size of the memory region which should be\n"
" dirtied by each vCPU. e.g. 10M or 3G.\n"
" (default: 1G)\n");
@@ -387,8 +406,11 @@ int main(int argc, char *argv[])
guest_modes_append_default();
- while ((opt = getopt(argc, argv, "ghi:p:m:b:f:v:os:x:")) != -1) {
+ while ((opt = getopt(argc, argv, "eghi:p:m:nb:f:v:os:x:")) != -1) {
switch (opt) {
+ case 'e':
+ /* 'e' is for evil. */
+ run_vcpus_while_disabling_dirty_logging = true;
case 'g':
dirty_log_manual_caps = 0;
break;
@@ -401,6 +423,9 @@ int main(int argc, char *argv[])
case 'm':
guest_modes_cmdline(optarg);
break;
+ case 'n':
+ perf_test_args.nested = true;
+ break;
case 'b':
guest_percpu_mem_size = parse_size(optarg);
break;
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 3fcd89e195c7..9c883c94d478 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -23,8 +23,6 @@
#include "guest_modes.h"
#include "processor.h"
-#define VCPU_ID 1
-
/* The memory slot index to track dirty pages */
#define TEST_MEM_SLOT_INDEX 1
@@ -212,34 +210,31 @@ static void sem_wait_until(sem_t *sem)
static bool clear_log_supported(void)
{
- return kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
+ return kvm_has_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
}
static void clear_log_create_vm_done(struct kvm_vm *vm)
{
- struct kvm_enable_cap cap = {};
u64 manual_caps;
manual_caps = kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
TEST_ASSERT(manual_caps, "MANUAL_CAPS is zero!");
manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
KVM_DIRTY_LOG_INITIALLY_SET);
- cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
- cap.args[0] = manual_caps;
- vm_enable_cap(vm, &cap);
+ vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, manual_caps);
}
-static void dirty_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
+static void dirty_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
void *bitmap, uint32_t num_pages)
{
- kvm_vm_get_dirty_log(vm, slot, bitmap);
+ kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
}
-static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
+static void clear_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
void *bitmap, uint32_t num_pages)
{
- kvm_vm_get_dirty_log(vm, slot, bitmap);
- kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages);
+ kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap);
+ kvm_vm_clear_dirty_log(vcpu->vm, slot, bitmap, 0, num_pages);
}
/* Should only be called after a GUEST_SYNC */
@@ -253,14 +248,14 @@ static void vcpu_handle_sync_stop(void)
}
}
-static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
+static void default_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
{
- struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct kvm_run *run = vcpu->run;
TEST_ASSERT(ret == 0 || (ret == -1 && err == EINTR),
"vcpu run failed: errno=%d", err);
- TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,
+ TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Invalid guest sync status: exit_reason=%s\n",
exit_reason_str(run->exit_reason));
@@ -269,7 +264,7 @@ static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
static bool dirty_ring_supported(void)
{
- return kvm_check_cap(KVM_CAP_DIRTY_LOG_RING);
+ return kvm_has_cap(KVM_CAP_DIRTY_LOG_RING);
}
static void dirty_ring_create_vm_done(struct kvm_vm *vm)
@@ -331,7 +326,7 @@ static void dirty_ring_continue_vcpu(void)
sem_post(&sem_vcpu_cont);
}
-static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot,
+static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
void *bitmap, uint32_t num_pages)
{
/* We only have one vcpu */
@@ -351,10 +346,10 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot,
}
/* Only have one vcpu */
- count = dirty_ring_collect_one(vcpu_map_dirty_ring(vm, VCPU_ID),
+ count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu),
slot, bitmap, num_pages, &fetch_index);
- cleared = kvm_vm_reset_dirty_ring(vm);
+ cleared = kvm_vm_reset_dirty_ring(vcpu->vm);
/* Cleared pages should be the same as collected */
TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
@@ -369,12 +364,12 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot,
pr_info("Iteration %ld collected %u pages\n", iteration, count);
}
-static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
+static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
{
- struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct kvm_run *run = vcpu->run;
/* A ucall-sync or ring-full event is allowed */
- if (get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC) {
+ if (get_ucall(vcpu, NULL) == UCALL_SYNC) {
/* We should allow this to continue */
;
} else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL ||
@@ -408,10 +403,10 @@ struct log_mode {
/* Hook when the vm creation is done (before vcpu creation) */
void (*create_vm_done)(struct kvm_vm *vm);
/* Hook to collect the dirty pages into the bitmap provided */
- void (*collect_dirty_pages) (struct kvm_vm *vm, int slot,
+ void (*collect_dirty_pages) (struct kvm_vcpu *vcpu, int slot,
void *bitmap, uint32_t num_pages);
/* Hook to call when after each vcpu run */
- void (*after_vcpu_run)(struct kvm_vm *vm, int ret, int err);
+ void (*after_vcpu_run)(struct kvm_vcpu *vcpu, int ret, int err);
void (*before_vcpu_join) (void);
} log_modes[LOG_MODE_NUM] = {
{
@@ -473,22 +468,22 @@ static void log_mode_create_vm_done(struct kvm_vm *vm)
mode->create_vm_done(vm);
}
-static void log_mode_collect_dirty_pages(struct kvm_vm *vm, int slot,
+static void log_mode_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
void *bitmap, uint32_t num_pages)
{
struct log_mode *mode = &log_modes[host_log_mode];
TEST_ASSERT(mode->collect_dirty_pages != NULL,
"collect_dirty_pages() is required for any log mode!");
- mode->collect_dirty_pages(vm, slot, bitmap, num_pages);
+ mode->collect_dirty_pages(vcpu, slot, bitmap, num_pages);
}
-static void log_mode_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
+static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
{
struct log_mode *mode = &log_modes[host_log_mode];
if (mode->after_vcpu_run)
- mode->after_vcpu_run(vm, ret, err);
+ mode->after_vcpu_run(vcpu, ret, err);
}
static void log_mode_before_vcpu_join(void)
@@ -509,16 +504,15 @@ static void generate_random_array(uint64_t *guest_array, uint64_t size)
static void *vcpu_worker(void *data)
{
- int ret, vcpu_fd;
- struct kvm_vm *vm = data;
+ int ret;
+ struct kvm_vcpu *vcpu = data;
+ struct kvm_vm *vm = vcpu->vm;
uint64_t *guest_array;
uint64_t pages_count = 0;
struct kvm_signal_mask *sigmask = alloca(offsetof(struct kvm_signal_mask, sigset)
+ sizeof(sigset_t));
sigset_t *sigset = (sigset_t *) &sigmask->sigset;
- vcpu_fd = vcpu_get_fd(vm, VCPU_ID);
-
/*
* SIG_IPI is unblocked atomically while in KVM_RUN. It causes the
* ioctl to return with -EINTR, but it is still pending and we need
@@ -527,7 +521,7 @@ static void *vcpu_worker(void *data)
sigmask->len = 8;
pthread_sigmask(0, NULL, sigset);
sigdelset(sigset, SIG_IPI);
- vcpu_ioctl(vm, VCPU_ID, KVM_SET_SIGNAL_MASK, sigmask);
+ vcpu_ioctl(vcpu, KVM_SET_SIGNAL_MASK, sigmask);
sigemptyset(sigset);
sigaddset(sigset, SIG_IPI);
@@ -539,13 +533,13 @@ static void *vcpu_worker(void *data)
generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
pages_count += TEST_PAGES_PER_LOOP;
/* Let the guest dirty the random pages */
- ret = ioctl(vcpu_fd, KVM_RUN, NULL);
+ ret = __vcpu_run(vcpu);
if (ret == -1 && errno == EINTR) {
int sig = -1;
sigwait(sigset, &sig);
assert(sig == SIG_IPI);
}
- log_mode_after_vcpu_run(vm, ret, errno);
+ log_mode_after_vcpu_run(vcpu, ret, errno);
}
pr_info("Dirtied %"PRIu64" pages\n", pages_count);
@@ -671,21 +665,17 @@ static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
}
}
-static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
+static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu,
uint64_t extra_mem_pages, void *guest_code)
{
struct kvm_vm *vm;
- uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
- vm = vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
- kvm_vm_elf_load(vm, program_invocation_name);
-#ifdef __x86_64__
- vm_create_irqchip(vm);
-#endif
+ vm = __vm_create(mode, 1, extra_mem_pages);
+
log_mode_create_vm_done(vm);
- vm_vcpu_add_default(vm, vcpuid, guest_code);
+ *vcpu = vm_vcpu_add(vm, 0, guest_code);
return vm;
}
@@ -701,6 +691,7 @@ struct test_params {
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct test_params *p = arg;
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
unsigned long *bmap;
@@ -718,25 +709,23 @@ static void run_test(enum vm_guest_mode mode, void *arg)
* (e.g., 64K page size guest will need even less memory for
* page tables).
*/
- vm = create_vm(mode, VCPU_ID,
- 2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K),
- guest_code);
+ vm = create_vm(mode, &vcpu,
+ 2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K), guest_code);
- guest_page_size = vm_get_page_size(vm);
+ guest_page_size = vm->page_size;
/*
* A little more than 1G of guest page sized pages. Cover the
* case where the size is not aligned to 64 pages.
*/
- guest_num_pages = (1ul << (DIRTY_MEM_BITS -
- vm_get_page_shift(vm))) + 3;
+ guest_num_pages = (1ul << (DIRTY_MEM_BITS - vm->page_shift)) + 3;
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
host_page_size = getpagesize();
host_num_pages = vm_num_host_pages(mode, guest_num_pages);
if (!p->phys_offset) {
- guest_test_phys_mem = (vm_get_max_gfn(vm) -
- guest_num_pages) * guest_page_size;
+ guest_test_phys_mem = (vm->max_gfn - guest_num_pages) *
+ guest_page_size;
guest_test_phys_mem = align_down(guest_test_phys_mem, host_page_size);
} else {
guest_test_phys_mem = p->phys_offset;
@@ -781,12 +770,12 @@ static void run_test(enum vm_guest_mode mode, void *arg)
host_clear_count = 0;
host_track_next_count = 0;
- pthread_create(&vcpu_thread, NULL, vcpu_worker, vm);
+ pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);
while (iteration < p->iterations) {
/* Give the vcpu thread some time to dirty some pages */
usleep(p->interval * 1000);
- log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX,
+ log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX,
bmap, host_num_pages);
/*
diff --git a/tools/testing/selftests/kvm/hardware_disable_test.c b/tools/testing/selftests/kvm/hardware_disable_test.c
index b21c69a56daa..f5d59b9934f1 100644
--- a/tools/testing/selftests/kvm/hardware_disable_test.c
+++ b/tools/testing/selftests/kvm/hardware_disable_test.c
@@ -27,12 +27,6 @@
sem_t *sem;
-/* Arguments for the pthreads */
-struct payload {
- struct kvm_vm *vm;
- uint32_t index;
-};
-
static void guest_code(void)
{
for (;;)
@@ -42,14 +36,14 @@ static void guest_code(void)
static void *run_vcpu(void *arg)
{
- struct payload *payload = (struct payload *)arg;
- struct kvm_run *state = vcpu_state(payload->vm, payload->index);
+ struct kvm_vcpu *vcpu = arg;
+ struct kvm_run *run = vcpu->run;
- vcpu_run(payload->vm, payload->index);
+ vcpu_run(vcpu);
TEST_ASSERT(false, "%s: exited with reason %d: %s\n",
- __func__, state->exit_reason,
- exit_reason_str(state->exit_reason));
+ __func__, run->exit_reason,
+ exit_reason_str(run->exit_reason));
pthread_exit(NULL);
}
@@ -92,11 +86,11 @@ static inline void check_join(pthread_t thread, void **retval)
static void run_test(uint32_t run)
{
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
cpu_set_t cpu_set;
pthread_t threads[VCPU_NUM];
pthread_t throw_away;
- struct payload payloads[VCPU_NUM];
void *b;
uint32_t i, j;
@@ -104,18 +98,13 @@ static void run_test(uint32_t run)
for (i = 0; i < VCPU_NUM; i++)
CPU_SET(i, &cpu_set);
- vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
- kvm_vm_elf_load(vm, program_invocation_name);
- vm_create_irqchip(vm);
+ vm = vm_create(VCPU_NUM);
pr_debug("%s: [%d] start vcpus\n", __func__, run);
for (i = 0; i < VCPU_NUM; ++i) {
- vm_vcpu_add_default(vm, i, guest_code);
- payloads[i].vm = vm;
- payloads[i].index = i;
+ vcpu = vm_vcpu_add(vm, i, guest_code);
- check_create_thread(&threads[i], NULL, run_vcpu,
- (void *)&payloads[i]);
+ check_create_thread(&threads[i], NULL, run_vcpu, vcpu);
check_set_affinity(threads[i], &cpu_set);
for (j = 0; j < SLEEPING_THREAD_NUM; ++j) {
diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h
index 8f9f46979a00..a8124f9dd68a 100644
--- a/tools/testing/selftests/kvm/include/aarch64/processor.h
+++ b/tools/testing/selftests/kvm/include/aarch64/processor.h
@@ -19,7 +19,7 @@
/*
* KVM_ARM64_SYS_REG(sys_reg_id): Helper macro to convert
* SYS_* register definitions in asm/sysreg.h to use in KVM
- * calls such as get_reg() and set_reg().
+ * calls such as vcpu_get_reg() and vcpu_set_reg().
*/
#define KVM_ARM64_SYS_REG(sys_reg_id) \
ARM64_SYS_REG(sys_reg_Op0(sys_reg_id), \
@@ -47,25 +47,9 @@
#define MPIDR_HWID_BITMASK (0xff00fffffful)
-static inline void get_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint64_t *addr)
-{
- struct kvm_one_reg reg;
- reg.id = id;
- reg.addr = (uint64_t)addr;
- vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, &reg);
-}
-
-static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint64_t val)
-{
- struct kvm_one_reg reg;
- reg.id = id;
- reg.addr = (uint64_t)&val;
- vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, &reg);
-}
-
-void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init *init);
-void aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_vcpu_init *init, void *guest_code);
+void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init);
+struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+ struct kvm_vcpu_init *init, void *guest_code);
struct ex_regs {
u64 regs[31];
@@ -117,7 +101,7 @@ void aarch64_get_supported_page_sizes(uint32_t ipa,
bool *ps4k, bool *ps16k, bool *ps64k);
void vm_init_descriptor_tables(struct kvm_vm *vm);
-void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid);
+void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu);
typedef void(*handler_fn)(struct ex_regs *);
void vm_install_exception_handler(struct kvm_vm *vm,
@@ -185,4 +169,28 @@ static inline void local_irq_disable(void)
asm volatile("msr daifset, #3" : : : "memory");
}
+/**
+ * struct arm_smccc_res - Result from SMC/HVC call
+ * @a0-a3 result values from registers 0 to 3
+ */
+struct arm_smccc_res {
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+};
+
+/**
+ * smccc_hvc - Invoke a SMCCC function using the hvc conduit
+ * @function_id: the SMCCC function to be called
+ * @arg0-arg6: SMCCC function arguments, corresponding to registers x1-x7
+ * @res: pointer to write the return values from registers x0-x3
+ *
+ */
+void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
+ uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
+ uint64_t arg6, struct arm_smccc_res *res);
+
+uint32_t guest_get_vcpuid(void);
+
#endif /* SELFTEST_KVM_PROCESSOR_H */
diff --git a/tools/testing/selftests/kvm/include/aarch64/vgic.h b/tools/testing/selftests/kvm/include/aarch64/vgic.h
index 4442081221a0..0ac6f05c63f9 100644
--- a/tools/testing/selftests/kvm/include/aarch64/vgic.h
+++ b/tools/testing/selftests/kvm/include/aarch64/vgic.h
@@ -8,6 +8,8 @@
#include <linux/kvm.h>
+#include "kvm_util.h"
+
#define REDIST_REGION_ATTR_ADDR(count, base, flags, index) \
(((uint64_t)(count) << 52) | \
((uint64_t)((base) >> 16) << 16) | \
@@ -26,8 +28,8 @@ void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level);
int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level);
/* The vcpu arg only applies to private interrupts. */
-void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, uint32_t vcpu);
-void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, uint32_t vcpu);
+void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu);
+void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu);
#define KVM_IRQCHIP_NUM_PINS (1020 - 32)
diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
index 92cef0ffb19e..24fde97f6121 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_base.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
@@ -9,9 +9,14 @@
#include "test_util.h"
-#include "asm/kvm.h"
+#include <linux/compiler.h>
+#include "linux/hashtable.h"
#include "linux/list.h"
-#include "linux/kvm.h"
+#include <linux/kernel.h>
+#include <linux/kvm.h>
+#include "linux/rbtree.h"
+
+
#include <sys/ioctl.h>
#include "sparsebit.h"
@@ -21,20 +26,88 @@
#define NSEC_PER_SEC 1000000000L
-/*
- * Callers of kvm_util only have an incomplete/opaque description of the
- * structure kvm_util is using to maintain the state of a VM.
- */
-struct kvm_vm;
-
typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
+struct userspace_mem_region {
+ struct kvm_userspace_memory_region region;
+ struct sparsebit *unused_phy_pages;
+ int fd;
+ off_t offset;
+ void *host_mem;
+ void *host_alias;
+ void *mmap_start;
+ void *mmap_alias;
+ size_t mmap_size;
+ struct rb_node gpa_node;
+ struct rb_node hva_node;
+ struct hlist_node slot_node;
+};
+
+struct kvm_vcpu {
+ struct list_head list;
+ uint32_t id;
+ int fd;
+ struct kvm_vm *vm;
+ struct kvm_run *run;
+#ifdef __x86_64__
+ struct kvm_cpuid2 *cpuid;
+#endif
+ struct kvm_dirty_gfn *dirty_gfns;
+ uint32_t fetch_index;
+ uint32_t dirty_gfns_count;
+};
+
+struct userspace_mem_regions {
+ struct rb_root gpa_tree;
+ struct rb_root hva_tree;
+ DECLARE_HASHTABLE(slot_hash, 9);
+};
+
+struct kvm_vm {
+ int mode;
+ unsigned long type;
+ int kvm_fd;
+ int fd;
+ unsigned int pgtable_levels;
+ unsigned int page_size;
+ unsigned int page_shift;
+ unsigned int pa_bits;
+ unsigned int va_bits;
+ uint64_t max_gfn;
+ struct list_head vcpus;
+ struct userspace_mem_regions regions;
+ struct sparsebit *vpages_valid;
+ struct sparsebit *vpages_mapped;
+ bool has_irqchip;
+ bool pgd_created;
+ vm_paddr_t pgd;
+ vm_vaddr_t gdt;
+ vm_vaddr_t tss;
+ vm_vaddr_t idt;
+ vm_vaddr_t handlers;
+ uint32_t dirty_ring_size;
+
+ /* Cache of information for binary stats interface */
+ int stats_fd;
+ struct kvm_stats_header stats_header;
+ struct kvm_stats_desc *stats_desc;
+};
+
+
+#define kvm_for_each_vcpu(vm, i, vcpu) \
+ for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \
+ if (!((vcpu) = vm->vcpus[i])) \
+ continue; \
+ else
+
+struct userspace_mem_region *
+memslot2region(struct kvm_vm *vm, uint32_t memslot);
+
/* Minimum allocated guest virtual and physical addresses */
#define KVM_UTIL_MIN_VADDR 0x2000
#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
-#define DEFAULT_GUEST_PHY_PAGES 512
#define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000
#define DEFAULT_STACK_PGS 5
@@ -102,49 +175,194 @@ extern const struct vm_guest_mode_params vm_guest_mode_params[];
int open_path_or_exit(const char *path, int flags);
int open_kvm_dev_path_or_exit(void);
-int kvm_check_cap(long cap);
-int vm_check_cap(struct kvm_vm *vm, long cap);
-int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
-int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
- struct kvm_enable_cap *cap);
+unsigned int kvm_check_cap(long cap);
+
+static inline bool kvm_has_cap(long cap)
+{
+ return kvm_check_cap(cap);
+}
+
+#define __KVM_SYSCALL_ERROR(_name, _ret) \
+ "%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
+
+#define __KVM_IOCTL_ERROR(_name, _ret) __KVM_SYSCALL_ERROR(_name, _ret)
+#define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret)
+
+#define kvm_do_ioctl(fd, cmd, arg) \
+({ \
+ static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd), ""); \
+ ioctl(fd, cmd, arg); \
+})
+
+#define __kvm_ioctl(kvm_fd, cmd, arg) \
+ kvm_do_ioctl(kvm_fd, cmd, arg)
+
+
+#define _kvm_ioctl(kvm_fd, cmd, name, arg) \
+({ \
+ int ret = __kvm_ioctl(kvm_fd, cmd, arg); \
+ \
+ TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret)); \
+})
+
+#define kvm_ioctl(kvm_fd, cmd, arg) \
+ _kvm_ioctl(kvm_fd, cmd, #cmd, arg)
+
+static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
+
+#define __vm_ioctl(vm, cmd, arg) \
+({ \
+ static_assert_is_vm(vm); \
+ kvm_do_ioctl((vm)->fd, cmd, arg); \
+})
+
+#define _vm_ioctl(vm, cmd, name, arg) \
+({ \
+ int ret = __vm_ioctl(vm, cmd, arg); \
+ \
+ TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret)); \
+})
+
+#define vm_ioctl(vm, cmd, arg) \
+ _vm_ioctl(vm, cmd, #cmd, arg)
+
+
+static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { }
+
+#define __vcpu_ioctl(vcpu, cmd, arg) \
+({ \
+ static_assert_is_vcpu(vcpu); \
+ kvm_do_ioctl((vcpu)->fd, cmd, arg); \
+})
+
+#define _vcpu_ioctl(vcpu, cmd, name, arg) \
+({ \
+ int ret = __vcpu_ioctl(vcpu, cmd, arg); \
+ \
+ TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret)); \
+})
+
+#define vcpu_ioctl(vcpu, cmd, arg) \
+ _vcpu_ioctl(vcpu, cmd, #cmd, arg)
+
+/*
+ * Looks up and returns the value corresponding to the capability
+ * (KVM_CAP_*) given by cap.
+ */
+static inline int vm_check_cap(struct kvm_vm *vm, long cap)
+{
+ int ret = __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap);
+
+ TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret));
+ return ret;
+}
+
+static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
+{
+ struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
+
+ return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
+}
+static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
+{
+ struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
+
+ vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
+}
+
void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
const char *vm_guest_mode_string(uint32_t i);
-struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm);
void kvm_vm_free(struct kvm_vm *vmp);
-void kvm_vm_restart(struct kvm_vm *vmp, int perm);
+void kvm_vm_restart(struct kvm_vm *vmp);
void kvm_vm_release(struct kvm_vm *vmp);
-void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log);
-void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
- uint64_t first_page, uint32_t num_pages);
-uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm);
-
int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
size_t len);
-
void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
int kvm_memfd_alloc(size_t size, bool hugepages);
void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
-/*
- * VM VCPU Dump
- *
- * Input Args:
- * stream - Output FILE stream
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- * indent - Left margin indent amount
- *
- * Output Args: None
- *
- * Return: None
- *
- * Dumps the current state of the VCPU specified by @vcpuid, within the VM
- * given by @vm, to the FILE stream given by @stream.
- */
-void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid,
- uint8_t indent);
+static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
+{
+ struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
+
+ vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args);
+}
+
+static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
+ uint64_t first_page, uint32_t num_pages)
+{
+ struct kvm_clear_dirty_log args = {
+ .dirty_bitmap = log,
+ .slot = slot,
+ .first_page = first_page,
+ .num_pages = num_pages
+ };
+
+ vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args);
+}
+
+static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
+{
+ return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL);
+}
+
+static inline int vm_get_stats_fd(struct kvm_vm *vm)
+{
+ int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL);
+
+ TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd));
+ return fd;
+}
+
+static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header)
+{
+ ssize_t ret;
+
+ ret = read(stats_fd, header, sizeof(*header));
+ TEST_ASSERT(ret == sizeof(*header), "Read stats header");
+}
+
+struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
+ struct kvm_stats_header *header);
+
+static inline ssize_t get_stats_descriptor_size(struct kvm_stats_header *header)
+{
+ /*
+ * The base size of the descriptor is defined by KVM's ABI, but the
+ * size of the name field is variable, as far as KVM's ABI is
+ * concerned. For a given instance of KVM, the name field is the same
+ * size for all stats and is provided in the overall stats header.
+ */
+ return sizeof(struct kvm_stats_desc) + header->name_size;
+}
+
+static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc *stats,
+ int index,
+ struct kvm_stats_header *header)
+{
+ /*
+ * Note, size_desc includes the size of the name field, which is
+ * variable. i.e. this is NOT equivalent to &stats_desc[i].
+ */
+ return (void *)stats + index * get_stats_descriptor_size(header);
+}
+
+void read_stat_data(int stats_fd, struct kvm_stats_header *header,
+ struct kvm_stats_desc *desc, uint64_t *data,
+ size_t max_elements);
+
+void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
+ size_t max_elements);
+
+static inline uint64_t vm_get_stat(struct kvm_vm *vm, const char *stat_name)
+{
+ uint64_t data;
+
+ __vm_get_stat(vm, stat_name, &data, 1);
+ return data;
+}
void vm_create_irqchip(struct kvm_vm *vm);
@@ -157,18 +375,10 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
uint64_t guest_paddr, uint32_t slot, uint64_t npages,
uint32_t flags);
-void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl,
- void *arg);
-int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl,
- void *arg);
-void vm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg);
-int _vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg);
-void kvm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg);
-int _kvm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg);
void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
-void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid);
+struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
@@ -180,42 +390,219 @@ void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
-/*
- * Address Guest Virtual to Guest Physical
- *
- * Input Args:
- * vm - Virtual Machine
- * gva - VM virtual address
- *
- * Output Args: None
- *
- * Return:
- * Equivalent VM physical address
- *
- * Returns the VM physical address of the translated VM virtual
- * address given by @gva.
- */
-vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
-
-struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
-void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
-int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
-int vcpu_get_fd(struct kvm_vm *vm, uint32_t vcpuid);
-void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid);
-void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_guest_debug *debug);
-void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_mp_state *mp_state);
-struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid);
-void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
-void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
+void vcpu_run(struct kvm_vcpu *vcpu);
+int _vcpu_run(struct kvm_vcpu *vcpu);
+
+static inline int __vcpu_run(struct kvm_vcpu *vcpu)
+{
+ return __vcpu_ioctl(vcpu, KVM_RUN, NULL);
+}
+
+void vcpu_run_complete_io(struct kvm_vcpu *vcpu);
+struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu);
+
+static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap,
+ uint64_t arg0)
+{
+ struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
+
+ vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap);
+}
+
+static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug *debug)
+{
+ vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug);
+}
+
+static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state);
+}
+static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state);
+}
+
+static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ vcpu_ioctl(vcpu, KVM_GET_REGS, regs);
+}
+
+static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ vcpu_ioctl(vcpu, KVM_SET_REGS, regs);
+}
+static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+ vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs);
+
+}
+static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+ vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
+}
+static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+ return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
+}
+static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ vcpu_ioctl(vcpu, KVM_GET_FPU, fpu);
+}
+static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ vcpu_ioctl(vcpu, KVM_SET_FPU, fpu);
+}
+
+static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
+{
+ struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
+
+ return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
+}
+static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
+{
+ struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
+
+ return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
+}
+static inline void vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
+{
+ struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
+
+ vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
+}
+static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
+{
+ struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
+
+ vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
+}
+
+#ifdef __KVM_HAVE_VCPU_EVENTS
+static inline void vcpu_events_get(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events)
+{
+ vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events);
+}
+static inline void vcpu_events_set(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events)
+{
+ vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events);
+}
+#endif
+#ifdef __x86_64__
+static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu,
+ struct kvm_nested_state *state)
+{
+ vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state);
+}
+static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu,
+ struct kvm_nested_state *state)
+{
+ return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
+}
+
+static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu,
+ struct kvm_nested_state *state)
+{
+ vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
+}
+#endif
+static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
+{
+ int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL);
+
+ TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd));
+ return fd;
+}
+
+int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr);
+
+static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
+{
+ int ret = __kvm_has_device_attr(dev_fd, group, attr);
+
+ TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
+}
+
+int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val);
+
+static inline void kvm_device_attr_get(int dev_fd, uint32_t group,
+ uint64_t attr, void *val)
+{
+ int ret = __kvm_device_attr_get(dev_fd, group, attr, val);
+
+ TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret));
+}
+
+int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val);
+
+static inline void kvm_device_attr_set(int dev_fd, uint32_t group,
+ uint64_t attr, void *val)
+{
+ int ret = __kvm_device_attr_set(dev_fd, group, attr, val);
+
+ TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret));
+}
+
+static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
+ uint64_t attr)
+{
+ return __kvm_has_device_attr(vcpu->fd, group, attr);
+}
+
+static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
+ uint64_t attr)
+{
+ kvm_has_device_attr(vcpu->fd, group, attr);
+}
+
+static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
+ uint64_t attr, void *val)
+{
+ return __kvm_device_attr_get(vcpu->fd, group, attr, val);
+}
+
+static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
+ uint64_t attr, void *val)
+{
+ kvm_device_attr_get(vcpu->fd, group, attr, val);
+}
+
+static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
+ uint64_t attr, void *val)
+{
+ return __kvm_device_attr_set(vcpu->fd, group, attr, val);
+}
+
+static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
+ uint64_t attr, void *val)
+{
+ kvm_device_attr_set(vcpu->fd, group, attr, val);
+}
+
+int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
+int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
+
+static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type)
+{
+ int fd = __kvm_create_device(vm, type);
+
+ TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_DEVICE, fd));
+ return fd;
+}
+
+void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
/*
* VM VCPU Args Set
*
* Input Args:
* vm - Virtual Machine
- * vcpuid - VCPU ID
* num - number of arguments
* ... - arguments, each of type uint64_t
*
@@ -223,59 +610,16 @@ void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
*
* Return: None
*
- * Sets the first @num function input registers of the VCPU with @vcpuid,
- * per the C calling convention of the architecture, to the values given
- * as variable args. Each of the variable args is expected to be of type
- * uint64_t. The maximum @num can be is specific to the architecture.
+ * Sets the first @num input parameters for the function at @vcpu's entry point,
+ * per the C calling convention of the architecture, to the values given as
+ * variable args. Each of the variable args is expected to be of type uint64_t.
+ * The maximum @num can be is specific to the architecture.
*/
-void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...);
-
-void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_sregs *sregs);
-void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_sregs *sregs);
-int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_sregs *sregs);
-void vcpu_fpu_get(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_fpu *fpu);
-void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_fpu *fpu);
-void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg);
-void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg);
-#ifdef __KVM_HAVE_VCPU_EVENTS
-void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_vcpu_events *events);
-void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_vcpu_events *events);
-#endif
-#ifdef __x86_64__
-void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_nested_state *state);
-int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_nested_state *state, bool ignore_error);
-#endif
-void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid);
-
-int _kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr);
-int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr);
-int _kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test, int *fd);
-int kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test);
-int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
- void *val, bool write);
-int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
- void *val, bool write);
+void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...);
+
void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
-int _vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
- uint64_t attr);
-int vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
- uint64_t attr);
-int _vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
- uint64_t attr, void *val, bool write);
-int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
- uint64_t attr, void *val, bool write);
-
#define KVM_MAX_IRQ_ROUTES 4096
struct kvm_irq_routing *kvm_gsi_routing_create(void);
@@ -286,26 +630,6 @@ void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
const char *exit_reason_str(unsigned int exit_reason);
-void virt_pgd_alloc(struct kvm_vm *vm);
-
-/*
- * VM Virtual Page Map
- *
- * Input Args:
- * vm - Virtual Machine
- * vaddr - VM Virtual Address
- * paddr - VM Physical Address
- * memslot - Memory region slot for new virtual translation tables
- *
- * Output Args: None
- *
- * Return: None
- *
- * Within @vm, creates a virtual translation for the page starting
- * at @vaddr to the page starting at @paddr.
- */
-void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
-
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
uint32_t memslot);
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
@@ -313,55 +637,54 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
/*
- * Create a VM with reasonable defaults
- *
- * Input Args:
- * vcpuid - The id of the single VCPU to add to the VM.
- * extra_mem_pages - The number of extra pages to add (this will
- * decide how much extra space we will need to
- * setup the page tables using memslot 0)
- * guest_code - The vCPU's entry point
- *
- * Output Args: None
- *
- * Return:
- * Pointer to opaque structure that describes the created VM.
+ * ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also
+ * loads the test binary into guest memory and creates an IRQ chip (x86 only).
+ * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
+ * calculate the amount of memory needed for per-vCPU data, e.g. stacks.
*/
-struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
- void *guest_code);
+struct kvm_vm *____vm_create(enum vm_guest_mode mode, uint64_t nr_pages);
+struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
+ uint64_t nr_extra_pages);
+
+static inline struct kvm_vm *vm_create_barebones(void)
+{
+ return ____vm_create(VM_MODE_DEFAULT, 0);
+}
-/* Same as vm_create_default, but can be used for more than one vcpu */
-struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_mem_pages,
- uint32_t num_percpu_pages, void *guest_code,
- uint32_t vcpuids[]);
+static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
+{
+ return __vm_create(VM_MODE_DEFAULT, nr_runnable_vcpus, 0);
+}
-/* Like vm_create_default_with_vcpus, but accepts mode and slot0 memory as a parameter */
-struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
- uint64_t slot0_mem_pages, uint64_t extra_mem_pages,
- uint32_t num_percpu_pages, void *guest_code,
- uint32_t vcpuids[]);
+struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
+ uint64_t extra_mem_pages,
+ void *guest_code, struct kvm_vcpu *vcpus[]);
-/* Create a default VM without any vcpus. */
-struct kvm_vm *vm_create_without_vcpus(enum vm_guest_mode mode, uint64_t pages);
+static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
+ void *guest_code,
+ struct kvm_vcpu *vcpus[])
+{
+ return __vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, 0,
+ guest_code, vcpus);
+}
/*
- * Adds a vCPU with reasonable defaults (e.g. a stack)
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - The id of the VCPU to add to the VM.
- * guest_code - The vCPU's entry point
+ * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
+ * additional pages of guest memory. Returns the VM and vCPU (via out param).
*/
-void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code);
+struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
+ uint64_t extra_mem_pages,
+ void *guest_code);
-bool vm_is_unrestricted_guest(struct kvm_vm *vm);
+static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
+ void *guest_code)
+{
+ return __vm_create_with_one_vcpu(vcpu, 0, guest_code);
+}
-unsigned int vm_get_page_size(struct kvm_vm *vm);
-unsigned int vm_get_page_shift(struct kvm_vm *vm);
-unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
-uint64_t vm_get_max_gfn(struct kvm_vm *vm);
-int vm_get_fd(struct kvm_vm *vm);
+struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
+unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
@@ -381,11 +704,6 @@ struct kvm_userspace_memory_region *
kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
uint64_t end);
-struct kvm_dirty_log *
-allocate_kvm_dirty_log(struct kvm_userspace_memory_region *region);
-
-int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd);
-
#define sync_global_to_guest(vm, g) ({ \
typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
memcpy(_p, &(g), sizeof(g)); \
@@ -396,11 +714,124 @@ int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd);
memcpy(&(g), _p, sizeof(g)); \
})
-void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid);
+void assert_on_unhandled_exception(struct kvm_vcpu *vcpu);
+
+void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu,
+ uint8_t indent);
+
+static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
+ uint8_t indent)
+{
+ vcpu_arch_dump(stream, vcpu, indent);
+}
+
+/*
+ * Adds a vCPU with reasonable defaults (e.g. a stack)
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * vcpu_id - The id of the VCPU to add to the VM.
+ * guest_code - The vCPU's entry point
+ */
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+ void *guest_code);
+
+static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+ void *guest_code)
+{
+ return vm_arch_vcpu_add(vm, vcpu_id, guest_code);
+}
+
+/* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
+struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
+
+static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm,
+ uint32_t vcpu_id)
+{
+ return vm_arch_vcpu_recreate(vm, vcpu_id);
+}
+
+void vcpu_arch_free(struct kvm_vcpu *vcpu);
+
+void virt_arch_pgd_alloc(struct kvm_vm *vm);
+
+static inline void virt_pgd_alloc(struct kvm_vm *vm)
+{
+ virt_arch_pgd_alloc(vm);
+}
+
+/*
+ * VM Virtual Page Map
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * vaddr - VM Virtual Address
+ * paddr - VM Physical Address
+ * memslot - Memory region slot for new virtual translation tables
+ *
+ * Output Args: None
+ *
+ * Return: None
+ *
+ * Within @vm, creates a virtual translation for the page starting
+ * at @vaddr to the page starting at @paddr.
+ */
+void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
+
+static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
+{
+ virt_arch_pg_map(vm, vaddr, paddr);
+}
+
+
+/*
+ * Address Guest Virtual to Guest Physical
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * gva - VM virtual address
+ *
+ * Output Args: None
+ *
+ * Return:
+ * Equivalent VM physical address
+ *
+ * Returns the VM physical address of the translated VM virtual
+ * address given by @gva.
+ */
+vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
+
+static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+{
+ return addr_arch_gva2gpa(vm, gva);
+}
+
+/*
+ * Virtual Translation Tables Dump
+ *
+ * Input Args:
+ * stream - Output FILE stream
+ * vm - Virtual Machine
+ * indent - Left margin indent amount
+ *
+ * Output Args: None
+ *
+ * Return: None
+ *
+ * Dumps to the FILE stream given by @stream, the contents of all the
+ * virtual translation tables for the VM given by @vm.
+ */
+void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
+
+static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
+{
+ virt_arch_dump(stream, vm, indent);
+}
-int vm_get_stats_fd(struct kvm_vm *vm);
-int vcpu_get_stats_fd(struct kvm_vm *vm, uint32_t vcpuid);
-uint32_t guest_get_vcpuid(void);
+static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
+{
+ return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0);
+}
#endif /* SELFTEST_KVM_UTIL_BASE_H */
diff --git a/tools/testing/selftests/kvm/include/perf_test_util.h b/tools/testing/selftests/kvm/include/perf_test_util.h
index a86f953d8d36..eaa88df0555a 100644
--- a/tools/testing/selftests/kvm/include/perf_test_util.h
+++ b/tools/testing/selftests/kvm/include/perf_test_util.h
@@ -25,21 +25,27 @@ struct perf_test_vcpu_args {
uint64_t pages;
/* Only used by the host userspace part of the vCPU thread */
- int vcpu_id;
+ struct kvm_vcpu *vcpu;
+ int vcpu_idx;
};
struct perf_test_args {
struct kvm_vm *vm;
+ /* The starting address and size of the guest test region. */
uint64_t gpa;
+ uint64_t size;
uint64_t guest_page_size;
int wr_fract;
+ /* Run vCPUs in L2 instead of L1, if the architecture supports it. */
+ bool nested;
+
struct perf_test_vcpu_args vcpu_args[KVM_MAX_VCPUS];
};
extern struct perf_test_args perf_test_args;
-struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
+struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
uint64_t vcpu_memory_bytes, int slots,
enum vm_mem_backing_src_type backing_src,
bool partition_vcpu_memory_access);
@@ -49,5 +55,9 @@ void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract);
void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *));
void perf_test_join_vcpu_threads(int vcpus);
+void perf_test_guest_code(uint32_t vcpu_id);
+
+uint64_t perf_test_nested_pages(int nr_vcpus);
+void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]);
#endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h
index eca5c622efd2..d00d213c3805 100644
--- a/tools/testing/selftests/kvm/include/riscv/processor.h
+++ b/tools/testing/selftests/kvm/include/riscv/processor.h
@@ -38,26 +38,6 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t idx,
KVM_REG_RISCV_TIMER_REG(name), \
KVM_REG_SIZE_U64)
-static inline void get_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id,
- unsigned long *addr)
-{
- struct kvm_one_reg reg;
-
- reg.id = id;
- reg.addr = (unsigned long)addr;
- vcpu_get_reg(vm, vcpuid, &reg);
-}
-
-static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id,
- unsigned long val)
-{
- struct kvm_one_reg reg;
-
- reg.id = id;
- reg.addr = (unsigned long)&val;
- vcpu_set_reg(vm, vcpuid, &reg);
-}
-
/* L3 index Bit[47:39] */
#define PGTBL_L3_INDEX_MASK 0x0000FF8000000000ULL
#define PGTBL_L3_INDEX_SHIFT 39
@@ -119,10 +99,12 @@ static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id,
#define SATP_ASID_SHIFT 44
#define SATP_ASID_MASK _AC(0xFFFF, UL)
-#define SBI_EXT_EXPERIMENTAL_START 0x08000000
-#define SBI_EXT_EXPERIMENTAL_END 0x08FFFFFF
+#define SBI_EXT_EXPERIMENTAL_START 0x08000000
+#define SBI_EXT_EXPERIMENTAL_END 0x08FFFFFF
-#define KVM_RISCV_SELFTESTS_SBI_EXT SBI_EXT_EXPERIMENTAL_END
+#define KVM_RISCV_SELFTESTS_SBI_EXT SBI_EXT_EXPERIMENTAL_END
+#define KVM_RISCV_SELFTESTS_SBI_UCALL 0
+#define KVM_RISCV_SELFTESTS_SBI_UNEXP 1
struct sbiret {
long error;
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index 99e0dcdc923f..5c5a88180b6c 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -34,6 +34,13 @@ static inline int _no_printf(const char *format, ...) { return 0; }
#endif
void print_skip(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
+#define __TEST_REQUIRE(f, fmt, ...) \
+do { \
+ if (!(f)) \
+ ksft_exit_skip("- " fmt "\n", ##__VA_ARGS__); \
+} while (0)
+
+#define TEST_REQUIRE(f) __TEST_REQUIRE(f, "Requirement not met: %s", #f)
ssize_t test_write(int fd, const void *buf, size_t count);
ssize_t test_read(int fd, void *buf, size_t count);
diff --git a/tools/testing/selftests/kvm/include/ucall_common.h b/tools/testing/selftests/kvm/include/ucall_common.h
index 9eecc9d40b79..ee79d180e07e 100644
--- a/tools/testing/selftests/kvm/include/ucall_common.h
+++ b/tools/testing/selftests/kvm/include/ucall_common.h
@@ -6,6 +6,7 @@
*/
#ifndef SELFTEST_KVM_UCALL_COMMON_H
#define SELFTEST_KVM_UCALL_COMMON_H
+#include "test_util.h"
/* Common ucalls */
enum {
@@ -16,7 +17,7 @@ enum {
UCALL_UNHANDLED,
};
-#define UCALL_MAX_ARGS 6
+#define UCALL_MAX_ARGS 7
struct ucall {
uint64_t cmd;
@@ -26,17 +27,26 @@ struct ucall {
void ucall_init(struct kvm_vm *vm, void *arg);
void ucall_uninit(struct kvm_vm *vm);
void ucall(uint64_t cmd, int nargs, ...);
-uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc);
+uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc);
#define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \
ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4)
#define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage)
#define GUEST_DONE() ucall(UCALL_DONE, 0)
-#define __GUEST_ASSERT(_condition, _condstr, _nargs, _args...) do { \
- if (!(_condition)) \
- ucall(UCALL_ABORT, 2 + _nargs, \
- "Failed guest assert: " \
- _condstr, __LINE__, _args); \
+
+enum guest_assert_builtin_args {
+ GUEST_ERROR_STRING,
+ GUEST_FILE,
+ GUEST_LINE,
+ GUEST_ASSERT_BUILTIN_NARGS
+};
+
+#define __GUEST_ASSERT(_condition, _condstr, _nargs, _args...) \
+do { \
+ if (!(_condition)) \
+ ucall(UCALL_ABORT, GUEST_ASSERT_BUILTIN_NARGS + _nargs, \
+ "Failed guest assert: " _condstr, \
+ __FILE__, __LINE__, ##_args); \
} while (0)
#define GUEST_ASSERT(_condition) \
@@ -56,4 +66,45 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc);
#define GUEST_ASSERT_EQ(a, b) __GUEST_ASSERT((a) == (b), #a " == " #b, 2, a, b)
+#define __REPORT_GUEST_ASSERT(_ucall, fmt, _args...) \
+ TEST_FAIL("%s at %s:%ld\n" fmt, \
+ (const char *)(_ucall).args[GUEST_ERROR_STRING], \
+ (const char *)(_ucall).args[GUEST_FILE], \
+ (_ucall).args[GUEST_LINE], \
+ ##_args)
+
+#define GUEST_ASSERT_ARG(ucall, i) ((ucall).args[GUEST_ASSERT_BUILTIN_NARGS + i])
+
+#define REPORT_GUEST_ASSERT(ucall) \
+ __REPORT_GUEST_ASSERT((ucall), "")
+
+#define REPORT_GUEST_ASSERT_1(ucall, fmt) \
+ __REPORT_GUEST_ASSERT((ucall), \
+ fmt, \
+ GUEST_ASSERT_ARG((ucall), 0))
+
+#define REPORT_GUEST_ASSERT_2(ucall, fmt) \
+ __REPORT_GUEST_ASSERT((ucall), \
+ fmt, \
+ GUEST_ASSERT_ARG((ucall), 0), \
+ GUEST_ASSERT_ARG((ucall), 1))
+
+#define REPORT_GUEST_ASSERT_3(ucall, fmt) \
+ __REPORT_GUEST_ASSERT((ucall), \
+ fmt, \
+ GUEST_ASSERT_ARG((ucall), 0), \
+ GUEST_ASSERT_ARG((ucall), 1), \
+ GUEST_ASSERT_ARG((ucall), 2))
+
+#define REPORT_GUEST_ASSERT_4(ucall, fmt) \
+ __REPORT_GUEST_ASSERT((ucall), \
+ fmt, \
+ GUEST_ASSERT_ARG((ucall), 0), \
+ GUEST_ASSERT_ARG((ucall), 1), \
+ GUEST_ASSERT_ARG((ucall), 2), \
+ GUEST_ASSERT_ARG((ucall), 3))
+
+#define REPORT_GUEST_ASSERT_N(ucall, fmt, args...) \
+ __REPORT_GUEST_ASSERT((ucall), fmt, ##args)
+
#endif /* SELFTEST_KVM_UCALL_COMMON_H */
diff --git a/tools/testing/selftests/kvm/include/x86_64/apic.h b/tools/testing/selftests/kvm/include/x86_64/apic.h
index ac88557dcc9a..bed316fdecd5 100644
--- a/tools/testing/selftests/kvm/include/x86_64/apic.h
+++ b/tools/testing/selftests/kvm/include/x86_64/apic.h
@@ -35,6 +35,7 @@
#define APIC_SPIV_APIC_ENABLED (1 << 8)
#define APIC_IRR 0x200
#define APIC_ICR 0x300
+#define APIC_LVTCMCI 0x2f0
#define APIC_DEST_SELF 0x40000
#define APIC_DEST_ALLINC 0x80000
#define APIC_DEST_ALLBUT 0xC0000
diff --git a/tools/testing/selftests/kvm/include/x86_64/evmcs.h b/tools/testing/selftests/kvm/include/x86_64/evmcs.h
index cc5d14a45702..3c9260f8e116 100644
--- a/tools/testing/selftests/kvm/include/x86_64/evmcs.h
+++ b/tools/testing/selftests/kvm/include/x86_64/evmcs.h
@@ -241,7 +241,7 @@ struct hv_enlightened_vmcs {
extern struct hv_enlightened_vmcs *current_evmcs;
extern struct hv_vp_assist_page *current_vp_assist;
-int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id);
+int vcpu_enable_evmcs(struct kvm_vcpu *vcpu);
static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist)
{
diff --git a/tools/testing/selftests/kvm/include/x86_64/mce.h b/tools/testing/selftests/kvm/include/x86_64/mce.h
new file mode 100644
index 000000000000..6119321f3f5d
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/x86_64/mce.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * tools/testing/selftests/kvm/include/x86_64/mce.h
+ *
+ * Copyright (C) 2022, Google LLC.
+ */
+
+#ifndef SELFTEST_KVM_MCE_H
+#define SELFTEST_KVM_MCE_H
+
+#define MCG_CTL_P BIT_ULL(8) /* MCG_CTL register available */
+#define MCG_SER_P BIT_ULL(24) /* MCA recovery/new status bits */
+#define MCG_LMCE_P BIT_ULL(27) /* Local machine check supported */
+#define MCG_CMCI_P BIT_ULL(10) /* CMCI supported */
+#define KVM_MAX_MCE_BANKS 32
+#define MCG_CAP_BANKS_MASK 0xff /* Bit 0-7 of the MCG_CAP register are #banks */
+#define MCI_STATUS_VAL (1ULL << 63) /* valid error */
+#define MCI_STATUS_UC (1ULL << 61) /* uncorrected error */
+#define MCI_STATUS_EN (1ULL << 60) /* error enabled */
+#define MCI_STATUS_MISCV (1ULL << 59) /* misc error reg. valid */
+#define MCI_STATUS_ADDRV (1ULL << 58) /* addr reg. valid */
+#define MCM_ADDR_PHYS 2 /* physical address */
+#define MCI_CTL2_CMCI_EN BIT_ULL(30)
+
+#endif /* SELFTEST_KVM_MCE_H */
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index d0d51adec76e..45edf45821d0 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -15,8 +15,12 @@
#include <asm/msr-index.h>
#include <asm/prctl.h>
+#include <linux/stringify.h>
+
#include "../kvm_util.h"
+#define NMI_VECTOR 0x02
+
#define X86_EFLAGS_FIXED (1u << 1)
#define X86_CR4_VME (1ul << 0)
@@ -41,24 +45,122 @@
#define X86_CR4_SMAP (1ul << 21)
#define X86_CR4_PKE (1ul << 22)
-/* CPUID.1.ECX */
-#define CPUID_VMX (1ul << 5)
-#define CPUID_SMX (1ul << 6)
-#define CPUID_PCID (1ul << 17)
-#define CPUID_XSAVE (1ul << 26)
+/* Note, these are ordered alphabetically to match kvm_cpuid_entry2. Eww. */
+enum cpuid_output_regs {
+ KVM_CPUID_EAX,
+ KVM_CPUID_EBX,
+ KVM_CPUID_ECX,
+ KVM_CPUID_EDX
+};
+
+/*
+ * Pack the information into a 64-bit value so that each X86_FEATURE_XXX can be
+ * passed by value with no overhead.
+ */
+struct kvm_x86_cpu_feature {
+ u32 function;
+ u16 index;
+ u8 reg;
+ u8 bit;
+};
+#define KVM_X86_CPU_FEATURE(fn, idx, gpr, __bit) \
+({ \
+ struct kvm_x86_cpu_feature feature = { \
+ .function = fn, \
+ .index = idx, \
+ .reg = KVM_CPUID_##gpr, \
+ .bit = __bit, \
+ }; \
+ \
+ feature; \
+})
-/* CPUID.7.EBX */
-#define CPUID_FSGSBASE (1ul << 0)
-#define CPUID_SMEP (1ul << 7)
-#define CPUID_SMAP (1ul << 20)
+/*
+ * Basic Leafs, a.k.a. Intel defined
+ */
+#define X86_FEATURE_MWAIT KVM_X86_CPU_FEATURE(0x1, 0, ECX, 3)
+#define X86_FEATURE_VMX KVM_X86_CPU_FEATURE(0x1, 0, ECX, 5)
+#define X86_FEATURE_SMX KVM_X86_CPU_FEATURE(0x1, 0, ECX, 6)
+#define X86_FEATURE_PDCM KVM_X86_CPU_FEATURE(0x1, 0, ECX, 15)
+#define X86_FEATURE_PCID KVM_X86_CPU_FEATURE(0x1, 0, ECX, 17)
+#define X86_FEATURE_X2APIC KVM_X86_CPU_FEATURE(0x1, 0, ECX, 21)
+#define X86_FEATURE_MOVBE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 22)
+#define X86_FEATURE_TSC_DEADLINE_TIMER KVM_X86_CPU_FEATURE(0x1, 0, ECX, 24)
+#define X86_FEATURE_XSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 26)
+#define X86_FEATURE_OSXSAVE KVM_X86_CPU_FEATURE(0x1, 0, ECX, 27)
+#define X86_FEATURE_RDRAND KVM_X86_CPU_FEATURE(0x1, 0, ECX, 30)
+#define X86_FEATURE_MCE KVM_X86_CPU_FEATURE(0x1, 0, EDX, 7)
+#define X86_FEATURE_APIC KVM_X86_CPU_FEATURE(0x1, 0, EDX, 9)
+#define X86_FEATURE_CLFLUSH KVM_X86_CPU_FEATURE(0x1, 0, EDX, 19)
+#define X86_FEATURE_XMM KVM_X86_CPU_FEATURE(0x1, 0, EDX, 25)
+#define X86_FEATURE_XMM2 KVM_X86_CPU_FEATURE(0x1, 0, EDX, 26)
+#define X86_FEATURE_FSGSBASE KVM_X86_CPU_FEATURE(0x7, 0, EBX, 0)
+#define X86_FEATURE_TSC_ADJUST KVM_X86_CPU_FEATURE(0x7, 0, EBX, 1)
+#define X86_FEATURE_HLE KVM_X86_CPU_FEATURE(0x7, 0, EBX, 4)
+#define X86_FEATURE_SMEP KVM_X86_CPU_FEATURE(0x7, 0, EBX, 7)
+#define X86_FEATURE_INVPCID KVM_X86_CPU_FEATURE(0x7, 0, EBX, 10)
+#define X86_FEATURE_RTM KVM_X86_CPU_FEATURE(0x7, 0, EBX, 11)
+#define X86_FEATURE_MPX KVM_X86_CPU_FEATURE(0x7, 0, EBX, 14)
+#define X86_FEATURE_SMAP KVM_X86_CPU_FEATURE(0x7, 0, EBX, 20)
+#define X86_FEATURE_PCOMMIT KVM_X86_CPU_FEATURE(0x7, 0, EBX, 22)
+#define X86_FEATURE_CLFLUSHOPT KVM_X86_CPU_FEATURE(0x7, 0, EBX, 23)
+#define X86_FEATURE_CLWB KVM_X86_CPU_FEATURE(0x7, 0, EBX, 24)
+#define X86_FEATURE_UMIP KVM_X86_CPU_FEATURE(0x7, 0, ECX, 2)
+#define X86_FEATURE_PKU KVM_X86_CPU_FEATURE(0x7, 0, ECX, 3)
+#define X86_FEATURE_LA57 KVM_X86_CPU_FEATURE(0x7, 0, ECX, 16)
+#define X86_FEATURE_RDPID KVM_X86_CPU_FEATURE(0x7, 0, ECX, 22)
+#define X86_FEATURE_SHSTK KVM_X86_CPU_FEATURE(0x7, 0, ECX, 7)
+#define X86_FEATURE_IBT KVM_X86_CPU_FEATURE(0x7, 0, EDX, 20)
+#define X86_FEATURE_AMX_TILE KVM_X86_CPU_FEATURE(0x7, 0, EDX, 24)
+#define X86_FEATURE_SPEC_CTRL KVM_X86_CPU_FEATURE(0x7, 0, EDX, 26)
+#define X86_FEATURE_ARCH_CAPABILITIES KVM_X86_CPU_FEATURE(0x7, 0, EDX, 29)
+#define X86_FEATURE_PKS KVM_X86_CPU_FEATURE(0x7, 0, ECX, 31)
+#define X86_FEATURE_XTILECFG KVM_X86_CPU_FEATURE(0xD, 0, EAX, 17)
+#define X86_FEATURE_XTILEDATA KVM_X86_CPU_FEATURE(0xD, 0, EAX, 18)
+#define X86_FEATURE_XSAVES KVM_X86_CPU_FEATURE(0xD, 1, EAX, 3)
+#define X86_FEATURE_XFD KVM_X86_CPU_FEATURE(0xD, 1, EAX, 4)
-/* CPUID.7.ECX */
-#define CPUID_UMIP (1ul << 2)
-#define CPUID_PKU (1ul << 3)
-#define CPUID_LA57 (1ul << 16)
+/*
+ * Extended Leafs, a.k.a. AMD defined
+ */
+#define X86_FEATURE_SVM KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 2)
+#define X86_FEATURE_NX KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 20)
+#define X86_FEATURE_GBPAGES KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 26)
+#define X86_FEATURE_RDTSCP KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 27)
+#define X86_FEATURE_LM KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 29)
+#define X86_FEATURE_RDPRU KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 4)
+#define X86_FEATURE_AMD_IBPB KVM_X86_CPU_FEATURE(0x80000008, 0, EBX, 12)
+#define X86_FEATURE_NPT KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 0)
+#define X86_FEATURE_LBRV KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 1)
+#define X86_FEATURE_NRIPS KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 3)
+#define X86_FEATURE_TSCRATEMSR KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 4)
+#define X86_FEATURE_PAUSEFILTER KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 10)
+#define X86_FEATURE_PFTHRESHOLD KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 12)
+#define X86_FEATURE_VGIF KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 16)
+#define X86_FEATURE_SEV KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 1)
+#define X86_FEATURE_SEV_ES KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 3)
-/* CPUID.0x8000_0001.EDX */
-#define CPUID_GBPAGES (1ul << 26)
+/*
+ * KVM defined paravirt features.
+ */
+#define X86_FEATURE_KVM_CLOCKSOURCE KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 0)
+#define X86_FEATURE_KVM_NOP_IO_DELAY KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 1)
+#define X86_FEATURE_KVM_MMU_OP KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 2)
+#define X86_FEATURE_KVM_CLOCKSOURCE2 KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 3)
+#define X86_FEATURE_KVM_ASYNC_PF KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 4)
+#define X86_FEATURE_KVM_STEAL_TIME KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 5)
+#define X86_FEATURE_KVM_PV_EOI KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 6)
+#define X86_FEATURE_KVM_PV_UNHALT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 7)
+/* Bit 8 apparently isn't used?!?! */
+#define X86_FEATURE_KVM_PV_TLB_FLUSH KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 9)
+#define X86_FEATURE_KVM_ASYNC_PF_VMEXIT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 10)
+#define X86_FEATURE_KVM_PV_SEND_IPI KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 11)
+#define X86_FEATURE_KVM_POLL_CONTROL KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 12)
+#define X86_FEATURE_KVM_PV_SCHED_YIELD KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 13)
+#define X86_FEATURE_KVM_ASYNC_PF_INT KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 14)
+#define X86_FEATURE_KVM_MSI_EXT_DEST_ID KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 15)
+#define X86_FEATURE_KVM_HC_MAP_GPA_RANGE KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 16)
+#define X86_FEATURE_KVM_MIGRATION_CONTROL KVM_X86_CPU_FEATURE(0x40000001, 0, EAX, 17)
/* Page table bitfield declarations */
#define PTE_PRESENT_MASK BIT_ULL(0)
@@ -300,10 +402,13 @@ static inline void outl(uint16_t port, uint32_t value)
__asm__ __volatile__("outl %%eax, %%dx" : : "d"(port), "a"(value));
}
-static inline void cpuid(uint32_t *eax, uint32_t *ebx,
- uint32_t *ecx, uint32_t *edx)
+static inline void __cpuid(uint32_t function, uint32_t index,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
{
- /* ecx is often an input as well as an output. */
+ *eax = function;
+ *ecx = index;
+
asm volatile("cpuid"
: "=a" (*eax),
"=b" (*ebx),
@@ -313,6 +418,24 @@ static inline void cpuid(uint32_t *eax, uint32_t *ebx,
: "memory");
}
+static inline void cpuid(uint32_t function,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ return __cpuid(function, 0, eax, ebx, ecx, edx);
+}
+
+static inline bool this_cpu_has(struct kvm_x86_cpu_feature feature)
+{
+ uint32_t gprs[4];
+
+ __cpuid(feature.function, feature.index,
+ &gprs[KVM_CPUID_EAX], &gprs[KVM_CPUID_EBX],
+ &gprs[KVM_CPUID_ECX], &gprs[KVM_CPUID_EDX]);
+
+ return gprs[feature.reg] & BIT(feature.bit);
+}
+
#define SET_XMM(__var, __xmm) \
asm volatile("movq %0, %%"#__xmm : : "r"(__var) : #__xmm)
@@ -385,6 +508,21 @@ static inline void cpu_relax(void)
asm volatile("rep; nop" ::: "memory");
}
+#define vmmcall() \
+ __asm__ __volatile__( \
+ "vmmcall\n" \
+ )
+
+#define ud2() \
+ __asm__ __volatile__( \
+ "ud2\n" \
+ )
+
+#define hlt() \
+ __asm__ __volatile__( \
+ "hlt\n" \
+ )
+
bool is_intel_cpu(void);
bool is_amd_cpu(void);
@@ -405,39 +543,198 @@ static inline unsigned int x86_model(unsigned int eax)
return ((eax >> 12) & 0xf0) | ((eax >> 4) & 0x0f);
}
-struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid);
-void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_x86_state *state);
+struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu);
+void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state);
void kvm_x86_state_cleanup(struct kvm_x86_state *state);
-struct kvm_msr_list *kvm_get_msr_index_list(void);
+const struct kvm_msr_list *kvm_get_msr_index_list(void);
+const struct kvm_msr_list *kvm_get_feature_msr_index_list(void);
+bool kvm_msr_is_in_save_restore_list(uint32_t msr_index);
uint64_t kvm_get_feature_msr(uint64_t msr_index);
-struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
-struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
-int __vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_cpuid2 *cpuid);
-void vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_cpuid2 *cpuid);
+static inline void vcpu_msrs_get(struct kvm_vcpu *vcpu,
+ struct kvm_msrs *msrs)
+{
+ int r = __vcpu_ioctl(vcpu, KVM_GET_MSRS, msrs);
+
+ TEST_ASSERT(r == msrs->nmsrs,
+ "KVM_GET_MSRS failed, r: %i (failed on MSR %x)",
+ r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index);
+}
+static inline void vcpu_msrs_set(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs)
+{
+ int r = __vcpu_ioctl(vcpu, KVM_SET_MSRS, msrs);
+
+ TEST_ASSERT(r == msrs->nmsrs,
+ "KVM_GET_MSRS failed, r: %i (failed on MSR %x)",
+ r, r < 0 || r >= msrs->nmsrs ? -1 : msrs->entries[r].index);
+}
+static inline void vcpu_debugregs_get(struct kvm_vcpu *vcpu,
+ struct kvm_debugregs *debugregs)
+{
+ vcpu_ioctl(vcpu, KVM_GET_DEBUGREGS, debugregs);
+}
+static inline void vcpu_debugregs_set(struct kvm_vcpu *vcpu,
+ struct kvm_debugregs *debugregs)
+{
+ vcpu_ioctl(vcpu, KVM_SET_DEBUGREGS, debugregs);
+}
+static inline void vcpu_xsave_get(struct kvm_vcpu *vcpu,
+ struct kvm_xsave *xsave)
+{
+ vcpu_ioctl(vcpu, KVM_GET_XSAVE, xsave);
+}
+static inline void vcpu_xsave2_get(struct kvm_vcpu *vcpu,
+ struct kvm_xsave *xsave)
+{
+ vcpu_ioctl(vcpu, KVM_GET_XSAVE2, xsave);
+}
+static inline void vcpu_xsave_set(struct kvm_vcpu *vcpu,
+ struct kvm_xsave *xsave)
+{
+ vcpu_ioctl(vcpu, KVM_SET_XSAVE, xsave);
+}
+static inline void vcpu_xcrs_get(struct kvm_vcpu *vcpu,
+ struct kvm_xcrs *xcrs)
+{
+ vcpu_ioctl(vcpu, KVM_GET_XCRS, xcrs);
+}
+static inline void vcpu_xcrs_set(struct kvm_vcpu *vcpu, struct kvm_xcrs *xcrs)
+{
+ vcpu_ioctl(vcpu, KVM_SET_XCRS, xcrs);
+}
+
+const struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
+const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void);
+const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu);
-struct kvm_cpuid_entry2 *
-kvm_get_supported_cpuid_index(uint32_t function, uint32_t index);
+bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
+ struct kvm_x86_cpu_feature feature);
+
+static inline bool kvm_cpu_has(struct kvm_x86_cpu_feature feature)
+{
+ return kvm_cpuid_has(kvm_get_supported_cpuid(), feature);
+}
-static inline struct kvm_cpuid_entry2 *
-kvm_get_supported_cpuid_entry(uint32_t function)
+static inline size_t kvm_cpuid2_size(int nr_entries)
{
- return kvm_get_supported_cpuid_index(function, 0);
+ return sizeof(struct kvm_cpuid2) +
+ sizeof(struct kvm_cpuid_entry2) * nr_entries;
}
-uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index);
-int _vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
- uint64_t msr_value);
-void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
- uint64_t msr_value);
+/*
+ * Allocate a "struct kvm_cpuid2* instance, with the 0-length arrary of
+ * entries sized to hold @nr_entries. The caller is responsible for freeing
+ * the struct.
+ */
+static inline struct kvm_cpuid2 *allocate_kvm_cpuid2(int nr_entries)
+{
+ struct kvm_cpuid2 *cpuid;
+
+ cpuid = malloc(kvm_cpuid2_size(nr_entries));
+ TEST_ASSERT(cpuid, "-ENOMEM when allocating kvm_cpuid2");
+
+ cpuid->nent = nr_entries;
+
+ return cpuid;
+}
+
+const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
+ uint32_t function, uint32_t index);
+void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid);
+void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu);
+
+static inline struct kvm_cpuid_entry2 *__vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
+ uint32_t function,
+ uint32_t index)
+{
+ return (struct kvm_cpuid_entry2 *)get_cpuid_entry(vcpu->cpuid,
+ function, index);
+}
+
+static inline struct kvm_cpuid_entry2 *vcpu_get_cpuid_entry(struct kvm_vcpu *vcpu,
+ uint32_t function)
+{
+ return __vcpu_get_cpuid_entry(vcpu, function, 0);
+}
+
+static inline int __vcpu_set_cpuid(struct kvm_vcpu *vcpu)
+{
+ int r;
+
+ TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first");
+ r = __vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid);
+ if (r)
+ return r;
+
+ /* On success, refresh the cache to pick up adjustments made by KVM. */
+ vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
+ return 0;
+}
+
+static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu)
+{
+ TEST_ASSERT(vcpu->cpuid, "Must do vcpu_init_cpuid() first");
+ vcpu_ioctl(vcpu, KVM_SET_CPUID2, vcpu->cpuid);
+
+ /* Refresh the cache to pick up adjustments made by KVM. */
+ vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
+}
+
+void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr);
+
+void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function);
+void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu,
+ struct kvm_x86_cpu_feature feature,
+ bool set);
+
+static inline void vcpu_set_cpuid_feature(struct kvm_vcpu *vcpu,
+ struct kvm_x86_cpu_feature feature)
+{
+ vcpu_set_or_clear_cpuid_feature(vcpu, feature, true);
+
+}
+
+static inline void vcpu_clear_cpuid_feature(struct kvm_vcpu *vcpu,
+ struct kvm_x86_cpu_feature feature)
+{
+ vcpu_set_or_clear_cpuid_feature(vcpu, feature, false);
+}
+
+static inline const struct kvm_cpuid_entry2 *__kvm_get_supported_cpuid_entry(uint32_t function,
+ uint32_t index)
+{
+ return get_cpuid_entry(kvm_get_supported_cpuid(), function, index);
+}
+
+static inline const struct kvm_cpuid_entry2 *kvm_get_supported_cpuid_entry(uint32_t function)
+{
+ return __kvm_get_supported_cpuid_entry(function, 0);
+}
+
+uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index);
+int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value);
+
+static inline void vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index,
+ uint64_t msr_value)
+{
+ int r = _vcpu_set_msr(vcpu, msr_index, msr_value);
+
+ TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_SET_MSRS, r));
+}
+
+static inline uint32_t kvm_get_cpuid_max_basic(void)
+{
+ return kvm_get_supported_cpuid_entry(0)->eax;
+}
+
+static inline uint32_t kvm_get_cpuid_max_extended(void)
+{
+ return kvm_get_supported_cpuid_entry(0x80000000)->eax;
+}
-uint32_t kvm_get_cpuid_max_basic(void);
-uint32_t kvm_get_cpuid_max_extended(void);
void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
+bool vm_is_unrestricted_guest(struct kvm_vm *vm);
struct ex_regs {
uint64_t rax, rcx, rdx, rbx;
@@ -452,43 +749,112 @@ struct ex_regs {
};
void vm_init_descriptor_tables(struct kvm_vm *vm);
-void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid);
+void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu);
void vm_install_exception_handler(struct kvm_vm *vm, int vector,
void (*handler)(struct ex_regs *));
-uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr);
-void vm_set_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr,
- uint64_t pte);
+/* If a toddler were to say "abracadabra". */
+#define KVM_EXCEPTION_MAGIC 0xabacadabaull
/*
- * get_cpuid() - find matching CPUID entry and return pointer to it.
- */
-struct kvm_cpuid_entry2 *get_cpuid(struct kvm_cpuid2 *cpuid, uint32_t function,
- uint32_t index);
-/*
- * set_cpuid() - overwrites a matching cpuid entry with the provided value.
- * matches based on ent->function && ent->index. returns true
- * if a match was found and successfully overwritten.
- * @cpuid: the kvm cpuid list to modify.
- * @ent: cpuid entry to insert
+ * KVM selftest exception fixup uses registers to coordinate with the exception
+ * handler, versus the kernel's in-memory tables and KVM-Unit-Tests's in-memory
+ * per-CPU data. Using only registers avoids having to map memory into the
+ * guest, doesn't require a valid, stable GS.base, and reduces the risk of
+ * for recursive faults when accessing memory in the handler. The downside to
+ * using registers is that it restricts what registers can be used by the actual
+ * instruction. But, selftests are 64-bit only, making register* pressure a
+ * minor concern. Use r9-r11 as they are volatile, i.e. don't need* to be saved
+ * by the callee, and except for r11 are not implicit parameters to any
+ * instructions. Ideally, fixup would use r8-r10 and thus avoid implicit
+ * parameters entirely, but Hyper-V's hypercall ABI uses r8 and testing Hyper-V
+ * is higher priority than testing non-faulting SYSCALL/SYSRET.
+ *
+ * Note, the fixup handler deliberately does not handle #DE, i.e. the vector
+ * is guaranteed to be non-zero on fault.
+ *
+ * REGISTER INPUTS:
+ * r9 = MAGIC
+ * r10 = RIP
+ * r11 = new RIP on fault
+ *
+ * REGISTER OUTPUTS:
+ * r9 = exception vector (non-zero)
*/
-bool set_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 *ent);
+#define KVM_ASM_SAFE(insn) \
+ "mov $" __stringify(KVM_EXCEPTION_MAGIC) ", %%r9\n\t" \
+ "lea 1f(%%rip), %%r10\n\t" \
+ "lea 2f(%%rip), %%r11\n\t" \
+ "1: " insn "\n\t" \
+ "mov $0, %[vector]\n\t" \
+ "jmp 3f\n\t" \
+ "2:\n\t" \
+ "mov %%r9b, %[vector]\n\t" \
+ "3:\n\t"
+
+#define KVM_ASM_SAFE_OUTPUTS(v) [vector] "=qm"(v)
+#define KVM_ASM_SAFE_CLOBBERS "r9", "r10", "r11"
+
+#define kvm_asm_safe(insn, inputs...) \
+({ \
+ uint8_t vector; \
+ \
+ asm volatile(KVM_ASM_SAFE(insn) \
+ : KVM_ASM_SAFE_OUTPUTS(vector) \
+ : inputs \
+ : KVM_ASM_SAFE_CLOBBERS); \
+ vector; \
+})
+
+static inline uint8_t rdmsr_safe(uint32_t msr, uint64_t *val)
+{
+ uint8_t vector;
+ uint32_t a, d;
+
+ asm volatile(KVM_ASM_SAFE("rdmsr")
+ : "=a"(a), "=d"(d), KVM_ASM_SAFE_OUTPUTS(vector)
+ : "c"(msr)
+ : KVM_ASM_SAFE_CLOBBERS);
+
+ *val = (uint64_t)a | ((uint64_t)d << 32);
+ return vector;
+}
+
+static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val)
+{
+ return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr));
+}
+
+uint64_t vm_get_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
+ uint64_t vaddr);
+void vm_set_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
+ uint64_t vaddr, uint64_t pte);
uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
uint64_t a3);
-struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void);
-void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
-struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
-void vm_xsave_req_perm(int bit);
+void __vm_xsave_require_permission(int bit, const char *name);
-enum x86_page_size {
- X86_PAGE_SIZE_4K = 0,
- X86_PAGE_SIZE_2M,
- X86_PAGE_SIZE_1G,
+#define vm_xsave_require_permission(perm) \
+ __vm_xsave_require_permission(perm, #perm)
+
+enum pg_level {
+ PG_LEVEL_NONE,
+ PG_LEVEL_4K,
+ PG_LEVEL_2M,
+ PG_LEVEL_1G,
+ PG_LEVEL_512G,
+ PG_LEVEL_NUM
};
-void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- enum x86_page_size page_size);
+
+#define PG_LEVEL_SHIFT(_level) ((_level - 1) * 9 + 12)
+#define PG_LEVEL_SIZE(_level) (1ull << PG_LEVEL_SHIFT(_level))
+
+#define PG_SIZE_4K PG_LEVEL_SIZE(PG_LEVEL_4K)
+#define PG_SIZE_2M PG_LEVEL_SIZE(PG_LEVEL_2M)
+#define PG_SIZE_1G PG_LEVEL_SIZE(PG_LEVEL_1G)
+
+void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level);
/*
* Basic CPU control in CR0
@@ -505,9 +871,6 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
#define X86_CR0_CD (1UL<<30) /* Cache Disable */
#define X86_CR0_PG (1UL<<31) /* Paging */
-/* VMX_EPT_VPID_CAP bits */
-#define VMX_EPT_VPID_CAP_AD_BITS (1ULL << 21)
-
#define XSTATE_XTILE_CFG_BIT 17
#define XSTATE_XTILE_DATA_BIT 18
diff --git a/tools/testing/selftests/kvm/include/x86_64/svm.h b/tools/testing/selftests/kvm/include/x86_64/svm.h
index 2225e5077350..c8343ff84f7f 100644
--- a/tools/testing/selftests/kvm/include/x86_64/svm.h
+++ b/tools/testing/selftests/kvm/include/x86_64/svm.h
@@ -218,8 +218,6 @@ struct __attribute__ ((__packed__)) vmcb {
struct vmcb_save_area save;
};
-#define SVM_CPUID_FUNC 0x8000000a
-
#define SVM_VM_CR_SVM_DISABLE 4
#define SVM_SELECTOR_S_SHIFT 4
diff --git a/tools/testing/selftests/kvm/include/x86_64/svm_util.h b/tools/testing/selftests/kvm/include/x86_64/svm_util.h
index a25aabd8f5e7..a339b537a575 100644
--- a/tools/testing/selftests/kvm/include/x86_64/svm_util.h
+++ b/tools/testing/selftests/kvm/include/x86_64/svm_util.h
@@ -13,9 +13,8 @@
#include "svm.h"
#include "processor.h"
-#define CPUID_SVM_BIT 2
-#define CPUID_SVM BIT_ULL(CPUID_SVM_BIT)
-
+#define SVM_EXIT_EXCP_BASE 0x040
+#define SVM_EXIT_HLT 0x078
#define SVM_EXIT_MSR 0x07c
#define SVM_EXIT_VMMCALL 0x081
@@ -36,21 +35,19 @@ struct svm_test_data {
uint64_t msr_gpa;
};
+#define stgi() \
+ __asm__ __volatile__( \
+ "stgi\n" \
+ )
+
+#define clgi() \
+ __asm__ __volatile__( \
+ "clgi\n" \
+ )
+
struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva);
void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp);
void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa);
-bool nested_svm_supported(void);
-void nested_svm_check_supported(void);
-
-static inline bool cpu_has_svm(void)
-{
- u32 eax = 0x80000001, ecx;
-
- asm("cpuid" :
- "=a" (eax), "=c" (ecx) : "0" (eax) : "ebx", "edx");
-
- return ecx & CPUID_SVM;
-}
int open_sev_dev_path_or_exit(void);
diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h
index 583ceb0d1457..99fa1410964c 100644
--- a/tools/testing/selftests/kvm/include/x86_64/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h
@@ -96,6 +96,9 @@
#define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f
#define VMX_MISC_SAVE_EFER_LMA 0x00000020
+#define VMX_EPT_VPID_CAP_1G_PAGES 0x00020000
+#define VMX_EPT_VPID_CAP_AD_BITS 0x00200000
+
#define EXIT_REASON_FAILED_VMENTRY 0x80000000
#define EXIT_REASON_EXCEPTION_NMI 0
#define EXIT_REASON_EXTERNAL_INTERRUPT 1
@@ -604,8 +607,7 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx);
void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
bool load_vmcs(struct vmx_pages *vmx);
-bool nested_vmx_supported(void);
-void nested_vmx_check_supported(void);
+bool ept_1g_pages_supported(void);
void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr);
@@ -613,6 +615,8 @@ void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t nested_paddr, uint64_t paddr, uint64_t size);
void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t memslot);
+void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
+ uint64_t addr, uint64_t size);
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t eptp_memslot);
void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm);
diff --git a/tools/testing/selftests/kvm/kvm_binary_stats_test.c b/tools/testing/selftests/kvm/kvm_binary_stats_test.c
index 17f65d514915..0b45ac593387 100644
--- a/tools/testing/selftests/kvm/kvm_binary_stats_test.c
+++ b/tools/testing/selftests/kvm/kvm_binary_stats_test.c
@@ -26,163 +26,167 @@ static void stats_test(int stats_fd)
int i;
size_t size_desc;
size_t size_data = 0;
- struct kvm_stats_header *header;
+ struct kvm_stats_header header;
char *id;
struct kvm_stats_desc *stats_desc;
u64 *stats_data;
struct kvm_stats_desc *pdesc;
+ u32 type, unit, base;
/* Read kvm stats header */
- header = malloc(sizeof(*header));
- TEST_ASSERT(header, "Allocate memory for stats header");
+ read_stats_header(stats_fd, &header);
- ret = read(stats_fd, header, sizeof(*header));
- TEST_ASSERT(ret == sizeof(*header), "Read stats header");
- size_desc = sizeof(*stats_desc) + header->name_size;
+ size_desc = get_stats_descriptor_size(&header);
/* Read kvm stats id string */
- id = malloc(header->name_size);
+ id = malloc(header.name_size);
TEST_ASSERT(id, "Allocate memory for id string");
- ret = read(stats_fd, id, header->name_size);
- TEST_ASSERT(ret == header->name_size, "Read id string");
+
+ ret = read(stats_fd, id, header.name_size);
+ TEST_ASSERT(ret == header.name_size, "Read id string");
/* Check id string, that should start with "kvm" */
- TEST_ASSERT(!strncmp(id, "kvm", 3) && strlen(id) < header->name_size,
- "Invalid KVM stats type, id: %s", id);
+ TEST_ASSERT(!strncmp(id, "kvm", 3) && strlen(id) < header.name_size,
+ "Invalid KVM stats type, id: %s", id);
/* Sanity check for other fields in header */
- if (header->num_desc == 0) {
+ if (header.num_desc == 0) {
printf("No KVM stats defined!");
return;
}
- /* Check overlap */
- TEST_ASSERT(header->desc_offset > 0 && header->data_offset > 0
- && header->desc_offset >= sizeof(*header)
- && header->data_offset >= sizeof(*header),
- "Invalid offset fields in header");
- TEST_ASSERT(header->desc_offset > header->data_offset ||
- (header->desc_offset + size_desc * header->num_desc <=
- header->data_offset),
- "Descriptor block is overlapped with data block");
-
- /* Allocate memory for stats descriptors */
- stats_desc = calloc(header->num_desc, size_desc);
- TEST_ASSERT(stats_desc, "Allocate memory for stats descriptors");
+ /*
+ * The descriptor and data offsets must be valid, they must not overlap
+ * the header, and the descriptor and data blocks must not overlap each
+ * other. Note, the data block is rechecked after its size is known.
+ */
+ TEST_ASSERT(header.desc_offset && header.desc_offset >= sizeof(header) &&
+ header.data_offset && header.data_offset >= sizeof(header),
+ "Invalid offset fields in header");
+
+ TEST_ASSERT(header.desc_offset > header.data_offset ||
+ (header.desc_offset + size_desc * header.num_desc <= header.data_offset),
+ "Descriptor block is overlapped with data block");
+
/* Read kvm stats descriptors */
- ret = pread(stats_fd, stats_desc,
- size_desc * header->num_desc, header->desc_offset);
- TEST_ASSERT(ret == size_desc * header->num_desc,
- "Read KVM stats descriptors");
+ stats_desc = read_stats_descriptors(stats_fd, &header);
/* Sanity check for fields in descriptors */
- for (i = 0; i < header->num_desc; ++i) {
- pdesc = (void *)stats_desc + i * size_desc;
+ for (i = 0; i < header.num_desc; ++i) {
+ pdesc = get_stats_descriptor(stats_desc, i, &header);
+ type = pdesc->flags & KVM_STATS_TYPE_MASK;
+ unit = pdesc->flags & KVM_STATS_UNIT_MASK;
+ base = pdesc->flags & KVM_STATS_BASE_MASK;
+
+ /* Check name string */
+ TEST_ASSERT(strlen(pdesc->name) < header.name_size,
+ "KVM stats name (index: %d) too long", i);
+
/* Check type,unit,base boundaries */
- TEST_ASSERT((pdesc->flags & KVM_STATS_TYPE_MASK)
- <= KVM_STATS_TYPE_MAX, "Unknown KVM stats type");
- TEST_ASSERT((pdesc->flags & KVM_STATS_UNIT_MASK)
- <= KVM_STATS_UNIT_MAX, "Unknown KVM stats unit");
- TEST_ASSERT((pdesc->flags & KVM_STATS_BASE_MASK)
- <= KVM_STATS_BASE_MAX, "Unknown KVM stats base");
- /* Check exponent for stats unit
+ TEST_ASSERT(type <= KVM_STATS_TYPE_MAX,
+ "Unknown KVM stats (%s) type: %u", pdesc->name, type);
+ TEST_ASSERT(unit <= KVM_STATS_UNIT_MAX,
+ "Unknown KVM stats (%s) unit: %u", pdesc->name, unit);
+ TEST_ASSERT(base <= KVM_STATS_BASE_MAX,
+ "Unknown KVM stats (%s) base: %u", pdesc->name, base);
+
+ /*
+ * Check exponent for stats unit
* Exponent for counter should be greater than or equal to 0
* Exponent for unit bytes should be greater than or equal to 0
* Exponent for unit seconds should be less than or equal to 0
* Exponent for unit clock cycles should be greater than or
* equal to 0
+ * Exponent for unit boolean should be 0
*/
switch (pdesc->flags & KVM_STATS_UNIT_MASK) {
case KVM_STATS_UNIT_NONE:
case KVM_STATS_UNIT_BYTES:
case KVM_STATS_UNIT_CYCLES:
TEST_ASSERT(pdesc->exponent >= 0,
- "Unsupported KVM stats unit");
+ "Unsupported KVM stats (%s) exponent: %i",
+ pdesc->name, pdesc->exponent);
break;
case KVM_STATS_UNIT_SECONDS:
TEST_ASSERT(pdesc->exponent <= 0,
- "Unsupported KVM stats unit");
+ "Unsupported KVM stats (%s) exponent: %i",
+ pdesc->name, pdesc->exponent);
+ break;
+ case KVM_STATS_UNIT_BOOLEAN:
+ TEST_ASSERT(pdesc->exponent == 0,
+ "Unsupported KVM stats (%s) exponent: %d",
+ pdesc->name, pdesc->exponent);
break;
}
- /* Check name string */
- TEST_ASSERT(strlen(pdesc->name) < header->name_size,
- "KVM stats name(%s) too long", pdesc->name);
+
/* Check size field, which should not be zero */
- TEST_ASSERT(pdesc->size, "KVM descriptor(%s) with size of 0",
- pdesc->name);
+ TEST_ASSERT(pdesc->size,
+ "KVM descriptor(%s) with size of 0", pdesc->name);
/* Check bucket_size field */
switch (pdesc->flags & KVM_STATS_TYPE_MASK) {
case KVM_STATS_TYPE_LINEAR_HIST:
TEST_ASSERT(pdesc->bucket_size,
- "Bucket size of Linear Histogram stats (%s) is zero",
- pdesc->name);
+ "Bucket size of Linear Histogram stats (%s) is zero",
+ pdesc->name);
break;
default:
TEST_ASSERT(!pdesc->bucket_size,
- "Bucket size of stats (%s) is not zero",
- pdesc->name);
+ "Bucket size of stats (%s) is not zero",
+ pdesc->name);
}
size_data += pdesc->size * sizeof(*stats_data);
}
- /* Check overlap */
- TEST_ASSERT(header->data_offset >= header->desc_offset
- || header->data_offset + size_data <= header->desc_offset,
- "Data block is overlapped with Descriptor block");
+
+ /*
+ * Now that the size of the data block is known, verify the data block
+ * doesn't overlap the descriptor block.
+ */
+ TEST_ASSERT(header.data_offset >= header.desc_offset ||
+ header.data_offset + size_data <= header.desc_offset,
+ "Data block is overlapped with Descriptor block");
+
/* Check validity of all stats data size */
- TEST_ASSERT(size_data >= header->num_desc * sizeof(*stats_data),
- "Data size is not correct");
+ TEST_ASSERT(size_data >= header.num_desc * sizeof(*stats_data),
+ "Data size is not correct");
+
/* Check stats offset */
- for (i = 0; i < header->num_desc; ++i) {
- pdesc = (void *)stats_desc + i * size_desc;
+ for (i = 0; i < header.num_desc; ++i) {
+ pdesc = get_stats_descriptor(stats_desc, i, &header);
TEST_ASSERT(pdesc->offset < size_data,
- "Invalid offset (%u) for stats: %s",
- pdesc->offset, pdesc->name);
+ "Invalid offset (%u) for stats: %s",
+ pdesc->offset, pdesc->name);
}
/* Allocate memory for stats data */
stats_data = malloc(size_data);
TEST_ASSERT(stats_data, "Allocate memory for stats data");
/* Read kvm stats data as a bulk */
- ret = pread(stats_fd, stats_data, size_data, header->data_offset);
+ ret = pread(stats_fd, stats_data, size_data, header.data_offset);
TEST_ASSERT(ret == size_data, "Read KVM stats data");
/* Read kvm stats data one by one */
- size_data = 0;
- for (i = 0; i < header->num_desc; ++i) {
- pdesc = (void *)stats_desc + i * size_desc;
- ret = pread(stats_fd, stats_data,
- pdesc->size * sizeof(*stats_data),
- header->data_offset + size_data);
- TEST_ASSERT(ret == pdesc->size * sizeof(*stats_data),
- "Read data of KVM stats: %s", pdesc->name);
- size_data += pdesc->size * sizeof(*stats_data);
+ for (i = 0; i < header.num_desc; ++i) {
+ pdesc = get_stats_descriptor(stats_desc, i, &header);
+ read_stat_data(stats_fd, &header, pdesc, stats_data,
+ pdesc->size);
}
free(stats_data);
free(stats_desc);
free(id);
- free(header);
}
static void vm_stats_test(struct kvm_vm *vm)
{
- int stats_fd;
-
- /* Get fd for VM stats */
- stats_fd = vm_get_stats_fd(vm);
- TEST_ASSERT(stats_fd >= 0, "Get VM stats fd");
+ int stats_fd = vm_get_stats_fd(vm);
stats_test(stats_fd);
close(stats_fd);
TEST_ASSERT(fcntl(stats_fd, F_GETFD) == -1, "Stats fd not freed");
}
-static void vcpu_stats_test(struct kvm_vm *vm, int vcpu_id)
+static void vcpu_stats_test(struct kvm_vcpu *vcpu)
{
- int stats_fd;
-
- /* Get fd for VCPU stats */
- stats_fd = vcpu_get_stats_fd(vm, vcpu_id);
- TEST_ASSERT(stats_fd >= 0, "Get VCPU stats fd");
+ int stats_fd = vcpu_get_stats_fd(vcpu);
stats_test(stats_fd);
close(stats_fd);
@@ -203,6 +207,7 @@ static void vcpu_stats_test(struct kvm_vm *vm, int vcpu_id)
int main(int argc, char *argv[])
{
int i, j;
+ struct kvm_vcpu **vcpus;
struct kvm_vm **vms;
int max_vm = DEFAULT_NUM_VM;
int max_vcpu = DEFAULT_NUM_VCPU;
@@ -220,26 +225,26 @@ int main(int argc, char *argv[])
}
/* Check the extension for binary stats */
- if (kvm_check_cap(KVM_CAP_BINARY_STATS_FD) <= 0) {
- print_skip("Binary form statistics interface is not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_BINARY_STATS_FD));
/* Create VMs and VCPUs */
vms = malloc(sizeof(vms[0]) * max_vm);
TEST_ASSERT(vms, "Allocate memory for storing VM pointers");
+
+ vcpus = malloc(sizeof(struct kvm_vcpu *) * max_vm * max_vcpu);
+ TEST_ASSERT(vcpus, "Allocate memory for storing vCPU pointers");
+
for (i = 0; i < max_vm; ++i) {
- vms[i] = vm_create(VM_MODE_DEFAULT,
- DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+ vms[i] = vm_create_barebones();
for (j = 0; j < max_vcpu; ++j)
- vm_vcpu_add(vms[i], j);
+ vcpus[i * max_vcpu + j] = __vm_vcpu_add(vms[i], j);
}
/* Check stats read for every VM and VCPU */
for (i = 0; i < max_vm; ++i) {
vm_stats_test(vms[i]);
for (j = 0; j < max_vcpu; ++j)
- vcpu_stats_test(vms[i], j);
+ vcpu_stats_test(vcpus[i * max_vcpu + j]);
}
for (i = 0; i < max_vm; ++i)
diff --git a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
index aed9dc3ca1e9..31b3cb24b9a7 100644
--- a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
+++ b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
@@ -28,11 +28,11 @@ void test_vcpu_creation(int first_vcpu_id, int num_vcpus)
pr_info("Testing creating %d vCPUs, with IDs %d...%d.\n",
num_vcpus, first_vcpu_id, first_vcpu_id + num_vcpus - 1);
- vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+ vm = vm_create_barebones();
for (i = first_vcpu_id; i < first_vcpu_id + num_vcpus; i++)
/* This asserts that the vCPU was created. */
- vm_vcpu_add(vm, i);
+ __vm_vcpu_add(vm, i);
kvm_vm_free(vm);
}
@@ -64,11 +64,9 @@ int main(int argc, char *argv[])
rl.rlim_max = nr_fds_wanted;
int r = setrlimit(RLIMIT_NOFILE, &rl);
- if (r < 0) {
- printf("RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n",
+ __TEST_REQUIRE(r >= 0,
+ "RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n",
old_rlim_max, nr_fds_wanted);
- exit(KSFT_SKIP);
- }
} else {
TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!");
}
diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c
index 2c4a7563a4f8..f42c6ac6d71d 100644
--- a/tools/testing/selftests/kvm/kvm_page_table_test.c
+++ b/tools/testing/selftests/kvm/kvm_page_table_test.c
@@ -46,11 +46,6 @@ static const char * const test_stage_string[] = {
"KVM_ADJUST_MAPPINGS",
};
-struct vcpu_args {
- int vcpu_id;
- bool vcpu_write;
-};
-
struct test_args {
struct kvm_vm *vm;
uint64_t guest_test_virt_mem;
@@ -60,7 +55,7 @@ struct test_args {
uint64_t large_num_pages;
uint64_t host_pages_per_lpage;
enum vm_mem_backing_src_type src_type;
- struct vcpu_args vcpu_args[KVM_MAX_VCPUS];
+ struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
};
/*
@@ -92,17 +87,13 @@ static uint64_t guest_test_phys_mem;
*/
static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
-static void guest_code(int vcpu_id)
+static void guest_code(bool do_write)
{
struct test_args *p = &test_args;
- struct vcpu_args *vcpu_args = &p->vcpu_args[vcpu_id];
enum test_stage *current_stage = &guest_test_stage;
uint64_t addr;
int i, j;
- /* Make sure vCPU args data structure is not corrupt */
- GUEST_ASSERT(vcpu_args->vcpu_id == vcpu_id);
-
while (true) {
addr = p->guest_test_virt_mem;
@@ -123,7 +114,7 @@ static void guest_code(int vcpu_id)
*/
case KVM_CREATE_MAPPINGS:
for (i = 0; i < p->large_num_pages; i++) {
- if (vcpu_args->vcpu_write)
+ if (do_write)
*(uint64_t *)addr = 0x0123456789ABCDEF;
else
READ_ONCE(*(uint64_t *)addr);
@@ -193,17 +184,14 @@ static void guest_code(int vcpu_id)
static void *vcpu_worker(void *data)
{
- int ret;
- struct vcpu_args *vcpu_args = data;
- struct kvm_vm *vm = test_args.vm;
- int vcpu_id = vcpu_args->vcpu_id;
- struct kvm_run *run;
+ struct kvm_vcpu *vcpu = data;
+ bool do_write = !(vcpu->id % 2);
struct timespec start;
struct timespec ts_diff;
enum test_stage stage;
+ int ret;
- vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
- run = vcpu_state(vm, vcpu_id);
+ vcpu_args_set(vcpu, 1, do_write);
while (!READ_ONCE(host_quit)) {
ret = sem_wait(&test_stage_updated);
@@ -213,15 +201,15 @@ static void *vcpu_worker(void *data)
return NULL;
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
- ret = _vcpu_run(vm, vcpu_id);
+ ret = _vcpu_run(vcpu);
ts_diff = timespec_elapsed(start);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
- TEST_ASSERT(get_ucall(vm, vcpu_id, NULL) == UCALL_SYNC,
+ TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Invalid guest sync status: exit_reason=%s\n",
- exit_reason_str(run->exit_reason));
+ exit_reason_str(vcpu->run->exit_reason));
- pr_debug("Got sync event from vCPU %d\n", vcpu_id);
+ pr_debug("Got sync event from vCPU %d\n", vcpu->id);
stage = READ_ONCE(*current_stage);
/*
@@ -230,7 +218,7 @@ static void *vcpu_worker(void *data)
*/
pr_debug("vCPU %d has completed stage %s\n"
"execution time is: %ld.%.9lds\n\n",
- vcpu_id, test_stage_string[stage],
+ vcpu->id, test_stage_string[stage],
ts_diff.tv_sec, ts_diff.tv_nsec);
ret = sem_post(&test_stage_completed);
@@ -250,7 +238,6 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
{
int ret;
struct test_params *p = arg;
- struct vcpu_args *vcpu_args;
enum vm_mem_backing_src_type src_type = p->src_type;
uint64_t large_page_size = get_backing_src_pagesz(src_type);
uint64_t guest_page_size = vm_guest_mode_params[mode].page_size;
@@ -260,7 +247,6 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
uint64_t alignment;
void *host_test_mem;
struct kvm_vm *vm;
- int vcpu_id;
/* Align up the test memory size */
alignment = max(large_page_size, guest_page_size);
@@ -268,12 +254,12 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
/* Create a VM with enough guest pages */
guest_num_pages = test_mem_size / guest_page_size;
- vm = vm_create_with_vcpus(mode, nr_vcpus, DEFAULT_GUEST_PHY_PAGES,
- guest_num_pages, 0, guest_code, NULL);
+ vm = __vm_create_with_vcpus(mode, nr_vcpus, guest_num_pages,
+ guest_code, test_args.vcpus);
/* Align down GPA of the testing memslot */
if (!p->phys_offset)
- guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) *
+ guest_test_phys_mem = (vm->max_gfn - guest_num_pages) *
guest_page_size;
else
guest_test_phys_mem = p->phys_offset;
@@ -292,12 +278,6 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
test_args.host_pages_per_lpage = large_page_size / host_page_size;
test_args.src_type = src_type;
- for (vcpu_id = 0; vcpu_id < KVM_MAX_VCPUS; vcpu_id++) {
- vcpu_args = &test_args.vcpu_args[vcpu_id];
- vcpu_args->vcpu_id = vcpu_id;
- vcpu_args->vcpu_write = !(vcpu_id % 2);
- }
-
/* Add an extra memory slot with specified backing src type */
vm_userspace_mem_region_add(vm, src_type, guest_test_phys_mem,
TEST_MEM_SLOT_INDEX, guest_num_pages, 0);
@@ -363,12 +343,11 @@ static void vcpus_complete_new_stage(enum test_stage stage)
static void run_test(enum vm_guest_mode mode, void *arg)
{
- int ret;
pthread_t *vcpu_threads;
struct kvm_vm *vm;
- int vcpu_id;
struct timespec start;
struct timespec ts_diff;
+ int ret, i;
/* Create VM with vCPUs and make some pre-initialization */
vm = pre_init_before_test(mode, arg);
@@ -379,10 +358,9 @@ static void run_test(enum vm_guest_mode mode, void *arg)
host_quit = false;
*current_stage = KVM_BEFORE_MAPPINGS;
- for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
- pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker,
- &test_args.vcpu_args[vcpu_id]);
- }
+ for (i = 0; i < nr_vcpus; i++)
+ pthread_create(&vcpu_threads[i], NULL, vcpu_worker,
+ test_args.vcpus[i]);
vcpus_complete_new_stage(*current_stage);
pr_info("Started all vCPUs successfully\n");
@@ -424,13 +402,13 @@ static void run_test(enum vm_guest_mode mode, void *arg)
/* Tell the vcpu thread to quit */
host_quit = true;
- for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
+ for (i = 0; i < nr_vcpus; i++) {
ret = sem_post(&test_stage_updated);
TEST_ASSERT(ret == 0, "Error in sem_post");
}
- for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++)
- pthread_join(vcpu_threads[vcpu_id], NULL);
+ for (i = 0; i < nr_vcpus; i++)
+ pthread_join(vcpu_threads[i], NULL);
ret = sem_destroy(&test_stage_updated);
TEST_ASSERT(ret == 0, "Error in sem_destroy");
diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
index 9343d82519b4..6f5551368944 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
@@ -10,7 +10,6 @@
#include "guest_modes.h"
#include "kvm_util.h"
-#include "../kvm_util_internal.h"
#include "processor.h"
#define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000
@@ -75,7 +74,7 @@ static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
return 1 << (vm->page_shift - 3);
}
-void virt_pgd_alloc(struct kvm_vm *vm)
+void virt_arch_pgd_alloc(struct kvm_vm *vm)
{
if (!vm->pgd_created) {
vm_paddr_t paddr = vm_phy_pages_alloc(vm,
@@ -132,14 +131,14 @@ static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
*ptep |= (attr_idx << 2) | (1 << 10) /* Access Flag */;
}
-void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
+void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
{
uint64_t attr_idx = 4; /* NORMAL (See DEFAULT_MAIR_EL1) */
_virt_pg_map(vm, vaddr, paddr, attr_idx);
}
-vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
{
uint64_t *ptep;
@@ -196,7 +195,7 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t p
#endif
}
-void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
+void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
int level = 4 - (vm->pgtable_levels - 1);
uint64_t pgd, *ptep;
@@ -213,9 +212,10 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
}
}
-void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init *init)
+void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
{
struct kvm_vcpu_init default_init = { .target = -1, };
+ struct kvm_vm *vm = vcpu->vm;
uint64_t sctlr_el1, tcr_el1;
if (!init)
@@ -227,16 +227,16 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init
init->target = preferred.target;
}
- vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_INIT, init);
+ vcpu_ioctl(vcpu, KVM_ARM_VCPU_INIT, init);
/*
* Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
* registers, which the variable argument list macros do.
*/
- set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20);
+ vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20);
- get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1);
- get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1);
+ vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1);
+ vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1);
/* Configure base granule size */
switch (vm->mode) {
@@ -297,46 +297,49 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init
tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
- set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1);
- set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);
- set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1);
- set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), vm->pgd);
- set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpuid);
+ vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1);
+ vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);
+ vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1);
+ vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), vm->pgd);
+ vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpu->id);
}
-void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
+void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
{
uint64_t pstate, pc;
- get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate);
- get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc);
+ vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate), &pstate);
+ vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc);
fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
indent, "", pstate, pc);
}
-void aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_vcpu_init *init, void *guest_code)
+struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+ struct kvm_vcpu_init *init, void *guest_code)
{
size_t stack_size = vm->page_size == 4096 ?
DEFAULT_STACK_PGS * vm->page_size :
vm->page_size;
uint64_t stack_vaddr = vm_vaddr_alloc(vm, stack_size,
DEFAULT_ARM64_GUEST_STACK_VADDR_MIN);
+ struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
- vm_vcpu_add(vm, vcpuid);
- aarch64_vcpu_setup(vm, vcpuid, init);
+ aarch64_vcpu_setup(vcpu, init);
- set_reg(vm, vcpuid, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
- set_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
+ vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
+ vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
+
+ return vcpu;
}
-void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+ void *guest_code)
{
- aarch64_vcpu_add_default(vm, vcpuid, NULL, guest_code);
+ return aarch64_vcpu_add(vm, vcpu_id, NULL, guest_code);
}
-void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
+void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
{
va_list ap;
int i;
@@ -347,8 +350,8 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
va_start(ap, num);
for (i = 0; i < num; i++) {
- set_reg(vm, vcpuid, ARM64_CORE_REG(regs.regs[i]),
- va_arg(ap, uint64_t));
+ vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.regs[i]),
+ va_arg(ap, uint64_t));
}
va_end(ap);
@@ -361,11 +364,11 @@ void kvm_exit_unexpected_exception(int vector, uint64_t ec, bool valid_ec)
;
}
-void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
+void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
{
struct ucall uc;
- if (get_ucall(vm, vcpuid, &uc) != UCALL_UNHANDLED)
+ if (get_ucall(vcpu, &uc) != UCALL_UNHANDLED)
return;
if (uc.args[2]) /* valid_ec */ {
@@ -383,11 +386,11 @@ struct handlers {
handler_fn exception_handlers[VECTOR_NUM][ESR_EC_NUM];
};
-void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid)
+void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu)
{
extern char vectors;
- set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors);
+ vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors);
}
void route_exception(struct ex_regs *regs, int vector)
@@ -469,19 +472,19 @@ void aarch64_get_supported_page_sizes(uint32_t ipa,
};
kvm_fd = open_kvm_dev_path_or_exit();
- vm_fd = ioctl(kvm_fd, KVM_CREATE_VM, ipa);
- TEST_ASSERT(vm_fd >= 0, "Can't create VM");
+ vm_fd = __kvm_ioctl(kvm_fd, KVM_CREATE_VM, (void *)(unsigned long)ipa);
+ TEST_ASSERT(vm_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm_fd));
vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
- TEST_ASSERT(vcpu_fd >= 0, "Can't create vcpu");
+ TEST_ASSERT(vcpu_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu_fd));
err = ioctl(vm_fd, KVM_ARM_PREFERRED_TARGET, &preferred_init);
- TEST_ASSERT(err == 0, "Can't get target");
+ TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_ARM_PREFERRED_TARGET, err));
err = ioctl(vcpu_fd, KVM_ARM_VCPU_INIT, &preferred_init);
- TEST_ASSERT(err == 0, "Can't get init vcpu");
+ TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_ARM_VCPU_INIT, err));
err = ioctl(vcpu_fd, KVM_GET_ONE_REG, &reg);
- TEST_ASSERT(err == 0, "Can't get MMFR0");
+ TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd));
*ps4k = ((val >> 28) & 0xf) != 0xf;
*ps64k = ((val >> 24) & 0xf) == 0;
@@ -500,3 +503,28 @@ void __attribute__((constructor)) init_guest_modes(void)
{
guest_modes_append_default();
}
+
+void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
+ uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
+ uint64_t arg6, struct arm_smccc_res *res)
+{
+ asm volatile("mov w0, %w[function_id]\n"
+ "mov x1, %[arg0]\n"
+ "mov x2, %[arg1]\n"
+ "mov x3, %[arg2]\n"
+ "mov x4, %[arg3]\n"
+ "mov x5, %[arg4]\n"
+ "mov x6, %[arg5]\n"
+ "mov x7, %[arg6]\n"
+ "hvc #0\n"
+ "mov %[res0], x0\n"
+ "mov %[res1], x1\n"
+ "mov %[res2], x2\n"
+ "mov %[res3], x3\n"
+ : [res0] "=r"(res->a0), [res1] "=r"(res->a1),
+ [res2] "=r"(res->a2), [res3] "=r"(res->a3)
+ : [function_id] "r"(function_id), [arg0] "r"(arg0),
+ [arg1] "r"(arg1), [arg2] "r"(arg2), [arg3] "r"(arg3),
+ [arg4] "r"(arg4), [arg5] "r"(arg5), [arg6] "r"(arg6)
+ : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7");
+}
diff --git a/tools/testing/selftests/kvm/lib/aarch64/ucall.c b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
index e0b0164e9af8..ed237b744690 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/ucall.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
@@ -5,7 +5,6 @@
* Copyright (C) 2018, Red Hat, Inc.
*/
#include "kvm_util.h"
-#include "../kvm_util_internal.h"
static vm_vaddr_t *ucall_exit_mmio_addr;
@@ -52,7 +51,7 @@ void ucall_init(struct kvm_vm *vm, void *arg)
* lower and won't match physical addresses.
*/
bits = vm->va_bits - 1;
- bits = vm->pa_bits < bits ? vm->pa_bits : bits;
+ bits = min(vm->pa_bits, bits);
end = 1ul << bits;
start = end * 5 / 8;
step = end / 16;
@@ -73,25 +72,24 @@ void ucall_uninit(struct kvm_vm *vm)
void ucall(uint64_t cmd, int nargs, ...)
{
- struct ucall uc = {
- .cmd = cmd,
- };
+ struct ucall uc = {};
va_list va;
int i;
- nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
+ WRITE_ONCE(uc.cmd, cmd);
+ nargs = min(nargs, UCALL_MAX_ARGS);
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
- uc.args[i] = va_arg(va, uint64_t);
+ WRITE_ONCE(uc.args[i], va_arg(va, uint64_t));
va_end(va);
- *ucall_exit_mmio_addr = (vm_vaddr_t)&uc;
+ WRITE_ONCE(*ucall_exit_mmio_addr, (vm_vaddr_t)&uc);
}
-uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
+uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{
- struct kvm_run *run = vcpu_state(vm, vcpu_id);
+ struct kvm_run *run = vcpu->run;
struct ucall ucall = {};
if (uc)
@@ -104,9 +102,9 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8,
"Unexpected ucall exit mmio address access");
memcpy(&gva, run->mmio.data, sizeof(gva));
- memcpy(&ucall, addr_gva2hva(vm, gva), sizeof(ucall));
+ memcpy(&ucall, addr_gva2hva(vcpu->vm, gva), sizeof(ucall));
- vcpu_run_complete_io(vm, vcpu_id);
+ vcpu_run_complete_io(vcpu);
if (uc)
memcpy(uc, &ucall, sizeof(ucall));
}
diff --git a/tools/testing/selftests/kvm/lib/aarch64/vgic.c b/tools/testing/selftests/kvm/lib/aarch64/vgic.c
index 5d45046c1b80..b5f28d21a947 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/vgic.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/vgic.c
@@ -9,7 +9,6 @@
#include <asm/kvm.h>
#include "kvm_util.h"
-#include "../kvm_util_internal.h"
#include "vgic.h"
#include "gic.h"
#include "gic_v3.h"
@@ -52,31 +51,30 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs,
nr_vcpus, nr_vcpus_created);
/* Distributor setup */
- if (_kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3,
- false, &gic_fd) != 0)
- return -1;
+ gic_fd = __kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3);
+ if (gic_fd < 0)
+ return gic_fd;
- kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS,
- 0, &nr_irqs, true);
+ kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0, &nr_irqs);
- kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
- KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
+ kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
- kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_DIST, &gicd_base_gpa, true);
+ kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_DIST, &gicd_base_gpa);
nr_gic_pages = vm_calc_num_guest_pages(vm->mode, KVM_VGIC_V3_DIST_SIZE);
virt_map(vm, gicd_base_gpa, gicd_base_gpa, nr_gic_pages);
/* Redistributor setup */
redist_attr = REDIST_REGION_ATTR_ADDR(nr_vcpus, gicr_base_gpa, 0, 0);
- kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &redist_attr, true);
+ kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &redist_attr);
nr_gic_pages = vm_calc_num_guest_pages(vm->mode,
KVM_VGIC_V3_REDIST_SIZE * nr_vcpus);
virt_map(vm, gicr_base_gpa, gicr_base_gpa, nr_gic_pages);
- kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
- KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
+ kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL);
return gic_fd;
}
@@ -89,14 +87,14 @@ int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level)
uint64_t val;
int ret;
- ret = _kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO,
- attr, &val, false);
+ ret = __kvm_device_attr_get(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO,
+ attr, &val);
if (ret != 0)
return ret;
val |= 1U << index;
- ret = _kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO,
- attr, &val, true);
+ ret = __kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO,
+ attr, &val);
return ret;
}
@@ -104,8 +102,7 @@ void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level)
{
int ret = _kvm_irq_set_level_info(gic_fd, intid, level);
- TEST_ASSERT(ret == 0, "KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO failed, "
- "rc: %i errno: %i", ret, errno);
+ TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, ret));
}
int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level)
@@ -127,12 +124,11 @@ void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level)
{
int ret = _kvm_arm_irq_line(vm, intid, level);
- TEST_ASSERT(ret == 0, "KVM_IRQ_LINE failed, rc: %i errno: %i",
- ret, errno);
+ TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret));
}
-static void vgic_poke_irq(int gic_fd, uint32_t intid,
- uint32_t vcpu, uint64_t reg_off)
+static void vgic_poke_irq(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu,
+ uint64_t reg_off)
{
uint64_t reg = intid / 32;
uint64_t index = intid % 32;
@@ -145,7 +141,7 @@ static void vgic_poke_irq(int gic_fd, uint32_t intid,
if (intid_is_private) {
/* TODO: only vcpu 0 implemented for now. */
- assert(vcpu == 0);
+ assert(vcpu->id == 0);
attr += SZ_64K;
}
@@ -158,17 +154,17 @@ static void vgic_poke_irq(int gic_fd, uint32_t intid,
* intid will just make the read/writes point to above the intended
* register space (i.e., ICPENDR after ISPENDR).
*/
- kvm_device_access(gic_fd, group, attr, &val, false);
+ kvm_device_attr_get(gic_fd, group, attr, &val);
val |= 1ULL << index;
- kvm_device_access(gic_fd, group, attr, &val, true);
+ kvm_device_attr_set(gic_fd, group, attr, &val);
}
-void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, uint32_t vcpu)
+void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu)
{
vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISPENDR);
}
-void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, uint32_t vcpu)
+void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu)
{
vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISACTIVER);
}
diff --git a/tools/testing/selftests/kvm/lib/elf.c b/tools/testing/selftests/kvm/lib/elf.c
index 13e8e3dcf984..9f54c098d9d0 100644
--- a/tools/testing/selftests/kvm/lib/elf.c
+++ b/tools/testing/selftests/kvm/lib/elf.c
@@ -11,7 +11,6 @@
#include <linux/elf.h>
#include "kvm_util.h"
-#include "kvm_util_internal.h"
static void elfhdr_get(const char *filename, Elf64_Ehdr *hdrp)
{
diff --git a/tools/testing/selftests/kvm/lib/guest_modes.c b/tools/testing/selftests/kvm/lib/guest_modes.c
index 8784013b747c..99a575bbbc52 100644
--- a/tools/testing/selftests/kvm/lib/guest_modes.c
+++ b/tools/testing/selftests/kvm/lib/guest_modes.c
@@ -65,9 +65,9 @@ void guest_modes_append_default(void)
struct kvm_s390_vm_cpu_processor info;
kvm_fd = open_kvm_dev_path_or_exit();
- vm_fd = ioctl(kvm_fd, KVM_CREATE_VM, 0);
- kvm_device_access(vm_fd, KVM_S390_VM_CPU_MODEL,
- KVM_S390_VM_CPU_PROCESSOR, &info, false);
+ vm_fd = __kvm_ioctl(kvm_fd, KVM_CREATE_VM, NULL);
+ kvm_device_attr_get(vm_fd, KVM_S390_VM_CPU_MODEL,
+ KVM_S390_VM_CPU_PROCESSOR, &info);
close(vm_fd);
close(kvm_fd);
/* Starting with z13 we have 47bits of physical address */
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 1665a220abcb..9889fe0d8919 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -8,7 +8,6 @@
#define _GNU_SOURCE /* for program_invocation_name */
#include "test_util.h"
#include "kvm_util.h"
-#include "kvm_util_internal.h"
#include "processor.h"
#include <assert.h>
@@ -27,10 +26,7 @@ int open_path_or_exit(const char *path, int flags)
int fd;
fd = open(path, flags);
- if (fd < 0) {
- print_skip("%s not available (errno: %d)", path, errno);
- exit(KSFT_SKIP);
- }
+ __TEST_REQUIRE(fd >= 0, "%s not available (errno: %d)", path, errno);
return fd;
}
@@ -70,121 +66,34 @@ int open_kvm_dev_path_or_exit(void)
* Looks up and returns the value corresponding to the capability
* (KVM_CAP_*) given by cap.
*/
-int kvm_check_cap(long cap)
+unsigned int kvm_check_cap(long cap)
{
int ret;
int kvm_fd;
kvm_fd = open_kvm_dev_path_or_exit();
- ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
- TEST_ASSERT(ret >= 0, "KVM_CHECK_EXTENSION IOCTL failed,\n"
- " rc: %i errno: %i", ret, errno);
+ ret = __kvm_ioctl(kvm_fd, KVM_CHECK_EXTENSION, (void *)cap);
+ TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret));
close(kvm_fd);
- return ret;
-}
-
-/* VM Check Capability
- *
- * Input Args:
- * vm - Virtual Machine
- * cap - Capability
- *
- * Output Args: None
- *
- * Return:
- * On success, the Value corresponding to the capability (KVM_CAP_*)
- * specified by the value of cap. On failure a TEST_ASSERT failure
- * is produced.
- *
- * Looks up and returns the value corresponding to the capability
- * (KVM_CAP_*) given by cap.
- */
-int vm_check_cap(struct kvm_vm *vm, long cap)
-{
- int ret;
-
- ret = ioctl(vm->fd, KVM_CHECK_EXTENSION, cap);
- TEST_ASSERT(ret >= 0, "KVM_CHECK_EXTENSION VM IOCTL failed,\n"
- " rc: %i errno: %i", ret, errno);
-
- return ret;
-}
-
-/* VM Enable Capability
- *
- * Input Args:
- * vm - Virtual Machine
- * cap - Capability
- *
- * Output Args: None
- *
- * Return: On success, 0. On failure a TEST_ASSERT failure is produced.
- *
- * Enables a capability (KVM_CAP_*) on the VM.
- */
-int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
-{
- int ret;
-
- ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap);
- TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n"
- " rc: %i errno: %i", ret, errno);
-
- return ret;
-}
-
-/* VCPU Enable Capability
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpu_id - VCPU
- * cap - Capability
- *
- * Output Args: None
- *
- * Return: On success, 0. On failure a TEST_ASSERT failure is produced.
- *
- * Enables a capability (KVM_CAP_*) on the VCPU.
- */
-int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
- struct kvm_enable_cap *cap)
-{
- struct vcpu *vcpu = vcpu_find(vm, vcpu_id);
- int r;
-
- TEST_ASSERT(vcpu, "cannot find vcpu %d", vcpu_id);
-
- r = ioctl(vcpu->fd, KVM_ENABLE_CAP, cap);
- TEST_ASSERT(!r, "KVM_ENABLE_CAP vCPU ioctl failed,\n"
- " rc: %i, errno: %i", r, errno);
-
- return r;
+ return (unsigned int)ret;
}
void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
{
- struct kvm_enable_cap cap = { 0 };
-
- cap.cap = KVM_CAP_DIRTY_LOG_RING;
- cap.args[0] = ring_size;
- vm_enable_cap(vm, &cap);
+ vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size);
vm->dirty_ring_size = ring_size;
}
-static void vm_open(struct kvm_vm *vm, int perm)
+static void vm_open(struct kvm_vm *vm)
{
- vm->kvm_fd = _open_kvm_dev_path_or_exit(perm);
+ vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR);
- if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
- print_skip("immediate_exit not available");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_IMMEDIATE_EXIT));
- vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, vm->type);
- TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, "
- "rc: %i errno: %i", vm->fd, errno);
+ vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type);
+ TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd));
}
const char *vm_guest_mode_string(uint32_t i)
@@ -234,31 +143,12 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {
_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
"Missing new mode params?");
-/*
- * VM Create
- *
- * Input Args:
- * mode - VM Mode (e.g. VM_MODE_P52V48_4K)
- * phy_pages - Physical memory pages
- * perm - permission
- *
- * Output Args: None
- *
- * Return:
- * Pointer to opaque structure that describes the created VM.
- *
- * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K).
- * When phy_pages is non-zero, a memory region of phy_pages physical pages
- * is created and mapped starting at guest physical address 0. The file
- * descriptor to control the created VM is created with the permissions
- * given by perm (e.g. O_RDWR).
- */
-struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
+struct kvm_vm *____vm_create(enum vm_guest_mode mode, uint64_t nr_pages)
{
struct kvm_vm *vm;
- pr_debug("%s: mode='%s' pages='%ld' perm='%d'\n", __func__,
- vm_guest_mode_string(mode), phy_pages, perm);
+ pr_debug("%s: mode='%s' pages='%ld'\n", __func__,
+ vm_guest_mode_string(mode), nr_pages);
vm = calloc(1, sizeof(*vm));
TEST_ASSERT(vm != NULL, "Insufficient Memory");
@@ -340,7 +230,7 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits);
#endif
- vm_open(vm, perm);
+ vm_open(vm);
/* Limit to VA-bit canonical virtual addresses. */
vm->vpages_valid = sparsebit_alloc();
@@ -355,18 +245,56 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
/* Allocate and setup memory for guest. */
vm->vpages_mapped = sparsebit_alloc();
- if (phy_pages != 0)
+ if (nr_pages != 0)
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
- 0, 0, phy_pages, 0);
+ 0, 0, nr_pages, 0);
return vm;
}
-struct kvm_vm *vm_create_without_vcpus(enum vm_guest_mode mode, uint64_t pages)
+static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
+ uint32_t nr_runnable_vcpus,
+ uint64_t extra_mem_pages)
+{
+ uint64_t nr_pages;
+
+ TEST_ASSERT(nr_runnable_vcpus,
+ "Use vm_create_barebones() for VMs that _never_ have vCPUs\n");
+
+ TEST_ASSERT(nr_runnable_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
+ "nr_vcpus = %d too large for host, max-vcpus = %d",
+ nr_runnable_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
+
+ /*
+ * Arbitrarily allocate 512 pages (2mb when page size is 4kb) for the
+ * test code and other per-VM assets that will be loaded into memslot0.
+ */
+ nr_pages = 512;
+
+ /* Account for the per-vCPU stacks on behalf of the test. */
+ nr_pages += nr_runnable_vcpus * DEFAULT_STACK_PGS;
+
+ /*
+ * Account for the number of pages needed for the page tables. The
+ * maximum page table size for a memory region will be when the
+ * smallest page size is used. Considering each page contains x page
+ * table descriptors, the total extra size for page tables (for extra
+ * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
+ * than N/x*2.
+ */
+ nr_pages += (nr_pages + extra_mem_pages) / PTES_PER_MIN_PAGE * 2;
+
+ return vm_adjust_num_guest_pages(mode, nr_pages);
+}
+
+struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
+ uint64_t nr_extra_pages)
{
+ uint64_t nr_pages = vm_nr_pages_required(mode, nr_runnable_vcpus,
+ nr_extra_pages);
struct kvm_vm *vm;
- vm = vm_create(mode, pages, O_RDWR);
+ vm = ____vm_create(mode, nr_pages);
kvm_vm_elf_load(vm, program_invocation_name);
@@ -382,9 +310,7 @@ struct kvm_vm *vm_create_without_vcpus(enum vm_guest_mode mode, uint64_t pages)
* Input Args:
* mode - VM Mode (e.g. VM_MODE_P52V48_4K)
* nr_vcpus - VCPU count
- * slot0_mem_pages - Slot0 physical memory size
* extra_mem_pages - Non-slot0 physical memory total size
- * num_percpu_pages - Per-cpu physical memory pages
* guest_code - Guest entry point
* vcpuids - VCPU IDs
*
@@ -393,64 +319,39 @@ struct kvm_vm *vm_create_without_vcpus(enum vm_guest_mode mode, uint64_t pages)
* Return:
* Pointer to opaque structure that describes the created VM.
*
- * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K),
- * with customized slot0 memory size, at least 512 pages currently.
+ * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K).
* extra_mem_pages is only used to calculate the maximum page table size,
* no real memory allocation for non-slot0 memory in this function.
*/
-struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
- uint64_t slot0_mem_pages, uint64_t extra_mem_pages,
- uint32_t num_percpu_pages, void *guest_code,
- uint32_t vcpuids[])
+struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
+ uint64_t extra_mem_pages,
+ void *guest_code, struct kvm_vcpu *vcpus[])
{
- uint64_t vcpu_pages, extra_pg_pages, pages;
struct kvm_vm *vm;
int i;
- /* Force slot0 memory size not small than DEFAULT_GUEST_PHY_PAGES */
- if (slot0_mem_pages < DEFAULT_GUEST_PHY_PAGES)
- slot0_mem_pages = DEFAULT_GUEST_PHY_PAGES;
-
- /* The maximum page table size for a memory region will be when the
- * smallest pages are used. Considering each page contains x page
- * table descriptors, the total extra size for page tables (for extra
- * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
- * than N/x*2.
- */
- vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus;
- extra_pg_pages = (slot0_mem_pages + extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
- pages = slot0_mem_pages + vcpu_pages + extra_pg_pages;
-
- TEST_ASSERT(nr_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
- "nr_vcpus = %d too large for host, max-vcpus = %d",
- nr_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
-
- pages = vm_adjust_num_guest_pages(mode, pages);
-
- vm = vm_create_without_vcpus(mode, pages);
+ TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array");
- for (i = 0; i < nr_vcpus; ++i) {
- uint32_t vcpuid = vcpuids ? vcpuids[i] : i;
+ vm = __vm_create(mode, nr_vcpus, extra_mem_pages);
- vm_vcpu_add_default(vm, vcpuid, guest_code);
- }
+ for (i = 0; i < nr_vcpus; ++i)
+ vcpus[i] = vm_vcpu_add(vm, i, guest_code);
return vm;
}
-struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_mem_pages,
- uint32_t num_percpu_pages, void *guest_code,
- uint32_t vcpuids[])
+struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
+ uint64_t extra_mem_pages,
+ void *guest_code)
{
- return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, DEFAULT_GUEST_PHY_PAGES,
- extra_mem_pages, num_percpu_pages, guest_code, vcpuids);
-}
+ struct kvm_vcpu *vcpus[1];
+ struct kvm_vm *vm;
-struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
- void *guest_code)
-{
- return vm_create_default_with_vcpus(1, extra_mem_pages, 0, guest_code,
- (uint32_t []){ vcpuid });
+ vm = __vm_create_with_vcpus(VM_MODE_DEFAULT, 1, extra_mem_pages,
+ guest_code, vcpus);
+
+ *vcpu = vcpus[0];
+ return vm;
}
/*
@@ -458,7 +359,6 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
*
* Input Args:
* vm - VM that has been released before
- * perm - permission
*
* Output Args: None
*
@@ -466,12 +366,12 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
* global state, such as the irqchip and the memory regions that are mapped
* into the guest.
*/
-void kvm_vm_restart(struct kvm_vm *vmp, int perm)
+void kvm_vm_restart(struct kvm_vm *vmp)
{
int ctr;
struct userspace_mem_region *region;
- vm_open(vmp, perm);
+ vm_open(vmp);
if (vmp->has_irqchip)
vm_create_irqchip(vmp);
@@ -488,34 +388,17 @@ void kvm_vm_restart(struct kvm_vm *vmp, int perm)
}
}
-void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
+__weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm,
+ uint32_t vcpu_id)
{
- struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
- int ret;
-
- ret = ioctl(vm->fd, KVM_GET_DIRTY_LOG, &args);
- TEST_ASSERT(ret == 0, "%s: KVM_GET_DIRTY_LOG failed: %s",
- __func__, strerror(-ret));
+ return __vm_vcpu_add(vm, vcpu_id);
}
-void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
- uint64_t first_page, uint32_t num_pages)
+struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm)
{
- struct kvm_clear_dirty_log args = {
- .dirty_bitmap = log, .slot = slot,
- .first_page = first_page,
- .num_pages = num_pages
- };
- int ret;
-
- ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args);
- TEST_ASSERT(ret == 0, "%s: KVM_CLEAR_DIRTY_LOG failed: %s",
- __func__, strerror(-ret));
-}
+ kvm_vm_restart(vm);
-uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
-{
- return ioctl(vm->fd, KVM_RESET_DIRTY_RINGS);
+ return vm_vcpu_recreate(vm, 0);
}
/*
@@ -589,32 +472,9 @@ kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
return &region->region;
}
-/*
- * VCPU Find
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- *
- * Output Args: None
- *
- * Return:
- * Pointer to VCPU structure
- *
- * Locates a vcpu structure that describes the VCPU specified by vcpuid and
- * returns a pointer to it. Returns NULL if the VM doesn't contain a VCPU
- * for the specified vcpuid.
- */
-struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid)
+__weak void vcpu_arch_free(struct kvm_vcpu *vcpu)
{
- struct vcpu *vcpu;
-
- list_for_each_entry(vcpu, &vm->vcpus, list) {
- if (vcpu->id == vcpuid)
- return vcpu;
- }
- return NULL;
}
/*
@@ -629,43 +489,41 @@ struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid)
*
* Removes a vCPU from a VM and frees its resources.
*/
-static void vm_vcpu_rm(struct kvm_vm *vm, struct vcpu *vcpu)
+static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
{
int ret;
if (vcpu->dirty_gfns) {
ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
- TEST_ASSERT(ret == 0, "munmap of VCPU dirty ring failed, "
- "rc: %i errno: %i", ret, errno);
+ TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
vcpu->dirty_gfns = NULL;
}
- ret = munmap(vcpu->state, vcpu_mmap_sz());
- TEST_ASSERT(ret == 0, "munmap of VCPU fd failed, rc: %i "
- "errno: %i", ret, errno);
+ ret = munmap(vcpu->run, vcpu_mmap_sz());
+ TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
+
ret = close(vcpu->fd);
- TEST_ASSERT(ret == 0, "Close of VCPU fd failed, rc: %i "
- "errno: %i", ret, errno);
+ TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
list_del(&vcpu->list);
+
+ vcpu_arch_free(vcpu);
free(vcpu);
}
void kvm_vm_release(struct kvm_vm *vmp)
{
- struct vcpu *vcpu, *tmp;
+ struct kvm_vcpu *vcpu, *tmp;
int ret;
list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list)
vm_vcpu_rm(vmp, vcpu);
ret = close(vmp->fd);
- TEST_ASSERT(ret == 0, "Close of vm fd failed,\n"
- " vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno);
+ TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
ret = close(vmp->kvm_fd);
- TEST_ASSERT(ret == 0, "Close of /dev/kvm fd failed,\n"
- " vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno);
+ TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
}
static void __vm_mem_region_delete(struct kvm_vm *vm,
@@ -681,13 +539,11 @@ static void __vm_mem_region_delete(struct kvm_vm *vm,
}
region->region.memory_size = 0;
- ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
- TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed, "
- "rc: %i errno: %i", ret, errno);
+ vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, &region->region);
sparsebit_free(&region->unused_phy_pages);
ret = munmap(region->mmap_start, region->mmap_size);
- TEST_ASSERT(ret == 0, "munmap failed, rc: %i errno: %i", ret, errno);
+ TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
free(region);
}
@@ -704,6 +560,12 @@ void kvm_vm_free(struct kvm_vm *vmp)
if (vmp == NULL)
return;
+ /* Free cached stats metadata and close FD */
+ if (vmp->stats_fd) {
+ free(vmp->stats_desc);
+ close(vmp->stats_fd);
+ }
+
/* Free userspace_mem_regions. */
hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node)
__vm_mem_region_delete(vmp, region, false);
@@ -727,14 +589,13 @@ int kvm_memfd_alloc(size_t size, bool hugepages)
memfd_flags |= MFD_HUGETLB;
fd = memfd_create("kvm_selftest", memfd_flags);
- TEST_ASSERT(fd != -1, "memfd_create() failed, errno: %i (%s)",
- errno, strerror(errno));
+ TEST_ASSERT(fd != -1, __KVM_SYSCALL_ERROR("memfd_create()", fd));
r = ftruncate(fd, size);
- TEST_ASSERT(!r, "ftruncate() failed, errno: %i (%s)", errno, strerror(errno));
+ TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("ftruncate()", r));
r = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, size);
- TEST_ASSERT(!r, "fallocate() failed, errno: %i (%s)", errno, strerror(errno));
+ TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r));
return fd;
}
@@ -1000,8 +861,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
vm_mem_backing_src_alias(src_type)->flag,
region->fd, 0);
TEST_ASSERT(region->mmap_start != MAP_FAILED,
- "test_malloc failed, mmap_start: %p errno: %i",
- region->mmap_start, errno);
+ __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
TEST_ASSERT(!is_backing_src_hugetlb(src_type) ||
region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz),
@@ -1029,7 +889,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
region->region.guest_phys_addr = guest_paddr;
region->region.memory_size = npages * vm->page_size;
region->region.userspace_addr = (uintptr_t) region->host_mem;
- ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
+ ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, &region->region);
TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
" rc: %i errno: %i\n"
" slot: %u flags: 0x%x\n"
@@ -1049,7 +909,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
vm_mem_backing_src_alias(src_type)->flag,
region->fd, 0);
TEST_ASSERT(region->mmap_alias != MAP_FAILED,
- "mmap of alias failed, errno: %i", errno);
+ __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
/* Align host alias address */
region->host_alias = align_ptr_up(region->mmap_alias, alignment);
@@ -1112,7 +972,7 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
region->region.flags = flags;
- ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
+ ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, &region->region);
TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
" rc: %i errno: %i slot: %u flags: 0x%x",
@@ -1142,7 +1002,7 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
region->region.guest_phys_addr = new_gpa;
- ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
+ ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, &region->region);
TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed\n"
"ret: %i errno: %i slot: %u new_gpa: 0x%lx",
@@ -1167,19 +1027,7 @@ void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
__vm_mem_region_delete(vm, memslot2region(vm, slot), true);
}
-/*
- * VCPU mmap Size
- *
- * Input Args: None
- *
- * Output Args: None
- *
- * Return:
- * Size of VCPU state
- *
- * Returns the size of the structure pointed to by the return value
- * of vcpu_state().
- */
+/* Returns the size of a vCPU's kvm_run structure. */
static int vcpu_mmap_sz(void)
{
int dev_fd, ret;
@@ -1188,59 +1036,57 @@ static int vcpu_mmap_sz(void)
ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
TEST_ASSERT(ret >= sizeof(struct kvm_run),
- "%s KVM_GET_VCPU_MMAP_SIZE ioctl failed, rc: %i errno: %i",
- __func__, ret, errno);
+ KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, ret));
close(dev_fd);
return ret;
}
+static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id)
+{
+ struct kvm_vcpu *vcpu;
+
+ list_for_each_entry(vcpu, &vm->vcpus, list) {
+ if (vcpu->id == vcpu_id)
+ return true;
+ }
+
+ return false;
+}
+
/*
- * VM VCPU Add
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- *
- * Output Args: None
- *
- * Return: None
- *
- * Adds a virtual CPU to the VM specified by vm with the ID given by vcpuid.
- * No additional VCPU setup is done.
+ * Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id.
+ * No additional vCPU setup is done. Returns the vCPU.
*/
-void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid)
+struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
{
- struct vcpu *vcpu;
+ struct kvm_vcpu *vcpu;
/* Confirm a vcpu with the specified id doesn't already exist. */
- vcpu = vcpu_find(vm, vcpuid);
- if (vcpu != NULL)
- TEST_FAIL("vcpu with the specified id "
- "already exists,\n"
- " requested vcpuid: %u\n"
- " existing vcpuid: %u state: %p",
- vcpuid, vcpu->id, vcpu->state);
+ TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists\n", vcpu_id);
/* Allocate and initialize new vcpu structure. */
vcpu = calloc(1, sizeof(*vcpu));
TEST_ASSERT(vcpu != NULL, "Insufficient Memory");
- vcpu->id = vcpuid;
- vcpu->fd = ioctl(vm->fd, KVM_CREATE_VCPU, vcpuid);
- TEST_ASSERT(vcpu->fd >= 0, "KVM_CREATE_VCPU failed, rc: %i errno: %i",
- vcpu->fd, errno);
- TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->state), "vcpu mmap size "
+ vcpu->vm = vm;
+ vcpu->id = vcpu_id;
+ vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id);
+ TEST_ASSERT(vcpu->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu->fd));
+
+ TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size "
"smaller than expected, vcpu_mmap_sz: %i expected_min: %zi",
- vcpu_mmap_sz(), sizeof(*vcpu->state));
- vcpu->state = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(),
+ vcpu_mmap_sz(), sizeof(*vcpu->run));
+ vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(),
PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
- TEST_ASSERT(vcpu->state != MAP_FAILED, "mmap vcpu_state failed, "
- "vcpu id: %u errno: %i", vcpuid, errno);
+ TEST_ASSERT(vcpu->run != MAP_FAILED,
+ __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
/* Add to linked-list of VCPUs. */
list_add(&vcpu->list, &vm->vcpus);
+
+ return vcpu;
}
/*
@@ -1336,8 +1182,6 @@ va_found:
* vm - Virtual Machine
* sz - Size in bytes
* vaddr_min - Minimum starting virtual address
- * data_memslot - Memory region slot for data pages
- * pgd_memslot - Memory region slot for new virtual translation tables
*
* Output Args: None
*
@@ -1423,7 +1267,6 @@ vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm)
* vaddr - Virtuall address to map
* paddr - VM Physical Address
* npages - The number of pages to map
- * pgd_memslot - Memory region slot for new virtual translation tables
*
* Output Args: None
*
@@ -1534,11 +1377,10 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
* (without failing the test) if the guest memory is not shared (so
* no alias exists).
*
- * When vm_create() and related functions are called with a shared memory
- * src_type, we also create a writable, shared alias mapping of the
- * underlying guest memory. This allows the host to manipulate guest memory
- * without mapping that memory in the guest's address space. And, for
- * userfaultfd-based demand paging, we can do so without triggering userfaults.
+ * Create a writable, shared virtual=>physical alias for the specific GPA.
+ * The primary use case is to allow the host selftest to manipulate guest
+ * memory without mapping said memory in the guest's address space. And, for
+ * userfaultfd-based demand paging, to do so without triggering userfaults.
*/
void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
{
@@ -1556,452 +1398,90 @@ void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
return (void *) ((uintptr_t) region->host_alias + offset);
}
-/*
- * VM Create IRQ Chip
- *
- * Input Args:
- * vm - Virtual Machine
- *
- * Output Args: None
- *
- * Return: None
- *
- * Creates an interrupt controller chip for the VM specified by vm.
- */
+/* Create an interrupt controller chip for the specified VM. */
void vm_create_irqchip(struct kvm_vm *vm)
{
- int ret;
-
- ret = ioctl(vm->fd, KVM_CREATE_IRQCHIP, 0);
- TEST_ASSERT(ret == 0, "KVM_CREATE_IRQCHIP IOCTL failed, "
- "rc: %i errno: %i", ret, errno);
+ vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL);
vm->has_irqchip = true;
}
-/*
- * VM VCPU State
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- *
- * Output Args: None
- *
- * Return:
- * Pointer to structure that describes the state of the VCPU.
- *
- * Locates and returns a pointer to a structure that describes the
- * state of the VCPU with the given vcpuid.
- */
-struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid)
-{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
- TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
-
- return vcpu->state;
-}
-
-/*
- * VM VCPU Run
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- *
- * Output Args: None
- *
- * Return: None
- *
- * Switch to executing the code for the VCPU given by vcpuid, within the VM
- * given by vm.
- */
-void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
-{
- int ret = _vcpu_run(vm, vcpuid);
- TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, "
- "rc: %i errno: %i", ret, errno);
-}
-
-int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
+int _vcpu_run(struct kvm_vcpu *vcpu)
{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int rc;
- TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
do {
- rc = ioctl(vcpu->fd, KVM_RUN, NULL);
+ rc = __vcpu_run(vcpu);
} while (rc == -1 && errno == EINTR);
- assert_on_unhandled_exception(vm, vcpuid);
+ assert_on_unhandled_exception(vcpu);
return rc;
}
-int vcpu_get_fd(struct kvm_vm *vm, uint32_t vcpuid)
+/*
+ * Invoke KVM_RUN on a vCPU until KVM returns something other than -EINTR.
+ * Assert if the KVM returns an error (other than -EINTR).
+ */
+void vcpu_run(struct kvm_vcpu *vcpu)
{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+ int ret = _vcpu_run(vcpu);
- TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
-
- return vcpu->fd;
+ TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_RUN, ret));
}
-void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
+void vcpu_run_complete_io(struct kvm_vcpu *vcpu)
{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int ret;
- TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
-
- vcpu->state->immediate_exit = 1;
- ret = ioctl(vcpu->fd, KVM_RUN, NULL);
- vcpu->state->immediate_exit = 0;
+ vcpu->run->immediate_exit = 1;
+ ret = __vcpu_run(vcpu);
+ vcpu->run->immediate_exit = 0;
TEST_ASSERT(ret == -1 && errno == EINTR,
"KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
ret, errno);
}
-void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_guest_debug *debug)
-{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
- int ret = ioctl(vcpu->fd, KVM_SET_GUEST_DEBUG, debug);
-
- TEST_ASSERT(ret == 0, "KVM_SET_GUEST_DEBUG failed: %d", ret);
-}
-
-/*
- * VM VCPU Set MP State
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- * mp_state - mp_state to be set
- *
- * Output Args: None
- *
- * Return: None
- *
- * Sets the MP state of the VCPU given by vcpuid, to the state given
- * by mp_state.
- */
-void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_mp_state *mp_state)
-{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
- int ret;
-
- TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
-
- ret = ioctl(vcpu->fd, KVM_SET_MP_STATE, mp_state);
- TEST_ASSERT(ret == 0, "KVM_SET_MP_STATE IOCTL failed, "
- "rc: %i errno: %i", ret, errno);
-}
-
/*
- * VM VCPU Get Reg List
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- *
- * Output Args:
- * None
- *
- * Return:
- * A pointer to an allocated struct kvm_reg_list
- *
* Get the list of guest registers which are supported for
- * KVM_GET_ONE_REG/KVM_SET_ONE_REG calls
+ * KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls. Returns a kvm_reg_list pointer,
+ * it is the caller's responsibility to free the list.
*/
-struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid)
+struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu)
{
struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list;
int ret;
- ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, &reg_list_n);
+ ret = __vcpu_ioctl(vcpu, KVM_GET_REG_LIST, &reg_list_n);
TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0");
+
reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64));
reg_list->n = reg_list_n.n;
- vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, reg_list);
+ vcpu_ioctl(vcpu, KVM_GET_REG_LIST, reg_list);
return reg_list;
}
-/*
- * VM VCPU Regs Get
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- *
- * Output Args:
- * regs - current state of VCPU regs
- *
- * Return: None
- *
- * Obtains the current register state for the VCPU specified by vcpuid
- * and stores it at the location given by regs.
- */
-void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
-{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
- int ret;
-
- TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
-
- ret = ioctl(vcpu->fd, KVM_GET_REGS, regs);
- TEST_ASSERT(ret == 0, "KVM_GET_REGS failed, rc: %i errno: %i",
- ret, errno);
-}
-
-/*
- * VM VCPU Regs Set
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- * regs - Values to set VCPU regs to
- *
- * Output Args: None
- *
- * Return: None
- *
- * Sets the regs of the VCPU specified by vcpuid to the values
- * given by regs.
- */
-void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
+void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)
{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
- int ret;
-
- TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
-
- ret = ioctl(vcpu->fd, KVM_SET_REGS, regs);
- TEST_ASSERT(ret == 0, "KVM_SET_REGS failed, rc: %i errno: %i",
- ret, errno);
-}
-
-#ifdef __KVM_HAVE_VCPU_EVENTS
-void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_vcpu_events *events)
-{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
- int ret;
-
- TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
-
- ret = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, events);
- TEST_ASSERT(ret == 0, "KVM_GET_VCPU_EVENTS, failed, rc: %i errno: %i",
- ret, errno);
-}
-
-void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_vcpu_events *events)
-{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
- int ret;
-
- TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
-
- ret = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, events);
- TEST_ASSERT(ret == 0, "KVM_SET_VCPU_EVENTS, failed, rc: %i errno: %i",
- ret, errno);
-}
-#endif
-
-#ifdef __x86_64__
-void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_nested_state *state)
-{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
- int ret;
-
- TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
-
- ret = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, state);
- TEST_ASSERT(ret == 0,
- "KVM_SET_NESTED_STATE failed, ret: %i errno: %i",
- ret, errno);
-}
-
-int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_nested_state *state, bool ignore_error)
-{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
- int ret;
-
- TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
-
- ret = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, state);
- if (!ignore_error) {
- TEST_ASSERT(ret == 0,
- "KVM_SET_NESTED_STATE failed, ret: %i errno: %i",
- ret, errno);
- }
-
- return ret;
-}
-#endif
-
-/*
- * VM VCPU System Regs Get
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- *
- * Output Args:
- * sregs - current state of VCPU system regs
- *
- * Return: None
- *
- * Obtains the current system register state for the VCPU specified by
- * vcpuid and stores it at the location given by sregs.
- */
-void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
-{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
- int ret;
-
- TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
-
- ret = ioctl(vcpu->fd, KVM_GET_SREGS, sregs);
- TEST_ASSERT(ret == 0, "KVM_GET_SREGS failed, rc: %i errno: %i",
- ret, errno);
-}
-
-/*
- * VM VCPU System Regs Set
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- * sregs - Values to set VCPU system regs to
- *
- * Output Args: None
- *
- * Return: None
- *
- * Sets the system regs of the VCPU specified by vcpuid to the values
- * given by sregs.
- */
-void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
-{
- int ret = _vcpu_sregs_set(vm, vcpuid, sregs);
- TEST_ASSERT(ret == 0, "KVM_SET_SREGS IOCTL failed, "
- "rc: %i errno: %i", ret, errno);
-}
-
-int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
-{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
-
- TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
-
- return ioctl(vcpu->fd, KVM_SET_SREGS, sregs);
-}
-
-void vcpu_fpu_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu)
-{
- int ret;
-
- ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_FPU, fpu);
- TEST_ASSERT(ret == 0, "KVM_GET_FPU failed, rc: %i errno: %i (%s)",
- ret, errno, strerror(errno));
-}
-
-void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu)
-{
- int ret;
-
- ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_FPU, fpu);
- TEST_ASSERT(ret == 0, "KVM_SET_FPU failed, rc: %i errno: %i (%s)",
- ret, errno, strerror(errno));
-}
-
-void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg)
-{
- int ret;
-
- ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, reg);
- TEST_ASSERT(ret == 0, "KVM_GET_ONE_REG failed, rc: %i errno: %i (%s)",
- ret, errno, strerror(errno));
-}
-
-void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg)
-{
- int ret;
-
- ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, reg);
- TEST_ASSERT(ret == 0, "KVM_SET_ONE_REG failed, rc: %i errno: %i (%s)",
- ret, errno, strerror(errno));
-}
-
-/*
- * VCPU Ioctl
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- * cmd - Ioctl number
- * arg - Argument to pass to the ioctl
- *
- * Return: None
- *
- * Issues an arbitrary ioctl on a VCPU fd.
- */
-void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
- unsigned long cmd, void *arg)
-{
- int ret;
-
- ret = _vcpu_ioctl(vm, vcpuid, cmd, arg);
- TEST_ASSERT(ret == 0, "vcpu ioctl %lu failed, rc: %i errno: %i (%s)",
- cmd, ret, errno, strerror(errno));
-}
-
-int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
- unsigned long cmd, void *arg)
-{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
- int ret;
-
- TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
-
- ret = ioctl(vcpu->fd, cmd, arg);
-
- return ret;
-}
-
-void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid)
-{
- struct vcpu *vcpu;
- uint32_t size = vm->dirty_ring_size;
+ uint32_t page_size = vcpu->vm->page_size;
+ uint32_t size = vcpu->vm->dirty_ring_size;
TEST_ASSERT(size > 0, "Should enable dirty ring first");
- vcpu = vcpu_find(vm, vcpuid);
-
- TEST_ASSERT(vcpu, "Cannot find vcpu %u", vcpuid);
-
if (!vcpu->dirty_gfns) {
void *addr;
- addr = mmap(NULL, size, PROT_READ,
- MAP_PRIVATE, vcpu->fd,
- vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
+ addr = mmap(NULL, size, PROT_READ, MAP_PRIVATE, vcpu->fd,
+ page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private");
- addr = mmap(NULL, size, PROT_READ | PROT_EXEC,
- MAP_PRIVATE, vcpu->fd,
- vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
+ addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, vcpu->fd,
+ page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec");
- addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
- MAP_SHARED, vcpu->fd,
- vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
+ addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd,
+ page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed");
vcpu->dirty_gfns = addr;
@@ -2012,62 +1492,10 @@ void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid)
}
/*
- * VM Ioctl
- *
- * Input Args:
- * vm - Virtual Machine
- * cmd - Ioctl number
- * arg - Argument to pass to the ioctl
- *
- * Return: None
- *
- * Issues an arbitrary ioctl on a VM fd.
- */
-void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
-{
- int ret;
-
- ret = _vm_ioctl(vm, cmd, arg);
- TEST_ASSERT(ret == 0, "vm ioctl %lu failed, rc: %i errno: %i (%s)",
- cmd, ret, errno, strerror(errno));
-}
-
-int _vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
-{
- return ioctl(vm->fd, cmd, arg);
-}
-
-/*
- * KVM system ioctl
- *
- * Input Args:
- * vm - Virtual Machine
- * cmd - Ioctl number
- * arg - Argument to pass to the ioctl
- *
- * Return: None
- *
- * Issues an arbitrary ioctl on a KVM fd.
- */
-void kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
-{
- int ret;
-
- ret = ioctl(vm->kvm_fd, cmd, arg);
- TEST_ASSERT(ret == 0, "KVM ioctl %lu failed, rc: %i errno: %i (%s)",
- cmd, ret, errno, strerror(errno));
-}
-
-int _kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
-{
- return ioctl(vm->kvm_fd, cmd, arg);
-}
-
-/*
* Device Ioctl
*/
-int _kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
+int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
{
struct kvm_device_attr attribute = {
.group = group,
@@ -2078,43 +1506,31 @@ int _kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute);
}
-int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
-{
- int ret = _kvm_device_check_attr(dev_fd, group, attr);
-
- TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
- return ret;
-}
-
-int _kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test, int *fd)
+int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type)
{
- struct kvm_create_device create_dev;
- int ret;
+ struct kvm_create_device create_dev = {
+ .type = type,
+ .flags = KVM_CREATE_DEVICE_TEST,
+ };
- create_dev.type = type;
- create_dev.fd = -1;
- create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
- ret = ioctl(vm_get_fd(vm), KVM_CREATE_DEVICE, &create_dev);
- *fd = create_dev.fd;
- return ret;
+ return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
}
-int kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test)
+int __kvm_create_device(struct kvm_vm *vm, uint64_t type)
{
- int fd, ret;
-
- ret = _kvm_create_device(vm, type, test, &fd);
+ struct kvm_create_device create_dev = {
+ .type = type,
+ .fd = -1,
+ .flags = 0,
+ };
+ int err;
- if (!test) {
- TEST_ASSERT(!ret,
- "KVM_CREATE_DEVICE IOCTL failed, rc: %i errno: %i", ret, errno);
- return fd;
- }
- return ret;
+ err = __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
+ TEST_ASSERT(err <= 0, "KVM_CREATE_DEVICE shouldn't return a positive value");
+ return err ? : create_dev.fd;
}
-int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
- void *val, bool write)
+int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val)
{
struct kvm_device_attr kvmattr = {
.group = group,
@@ -2122,58 +1538,20 @@ int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
.flags = 0,
.addr = (uintptr_t)val,
};
- int ret;
-
- ret = ioctl(dev_fd, write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
- &kvmattr);
- return ret;
-}
-
-int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
- void *val, bool write)
-{
- int ret = _kvm_device_access(dev_fd, group, attr, val, write);
-
- TEST_ASSERT(!ret, "KVM_SET|GET_DEVICE_ATTR IOCTL failed, rc: %i errno: %i", ret, errno);
- return ret;
-}
-
-int _vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
- uint64_t attr)
-{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
-
- TEST_ASSERT(vcpu, "nonexistent vcpu id: %d", vcpuid);
-
- return _kvm_device_check_attr(vcpu->fd, group, attr);
-}
-
-int vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
- uint64_t attr)
-{
- int ret = _vcpu_has_device_attr(vm, vcpuid, group, attr);
-
- TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR IOCTL failed, rc: %i errno: %i", ret, errno);
- return ret;
-}
-int _vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
- uint64_t attr, void *val, bool write)
-{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
-
- TEST_ASSERT(vcpu, "nonexistent vcpu id: %d", vcpuid);
-
- return _kvm_device_access(vcpu->fd, group, attr, val, write);
+ return __kvm_ioctl(dev_fd, KVM_GET_DEVICE_ATTR, &kvmattr);
}
-int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
- uint64_t attr, void *val, bool write)
+int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val)
{
- int ret = _vcpu_access_device_attr(vm, vcpuid, group, attr, val, write);
+ struct kvm_device_attr kvmattr = {
+ .group = group,
+ .attr = attr,
+ .flags = 0,
+ .addr = (uintptr_t)val,
+ };
- TEST_ASSERT(!ret, "KVM_SET|GET_DEVICE_ATTR IOCTL failed, rc: %i errno: %i", ret, errno);
- return ret;
+ return __kvm_ioctl(dev_fd, KVM_SET_DEVICE_ATTR, &kvmattr);
}
/*
@@ -2187,14 +1565,14 @@ int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
.level = level,
};
- return _vm_ioctl(vm, KVM_IRQ_LINE, &irq_level);
+ return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level);
}
void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
{
int ret = _kvm_irq_line(vm, irq, level);
- TEST_ASSERT(ret >= 0, "KVM_IRQ_LINE failed, rc: %i errno: %i", ret, errno);
+ TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret));
}
struct kvm_irq_routing *kvm_gsi_routing_create(void)
@@ -2233,7 +1611,7 @@ int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
int ret;
assert(routing);
- ret = ioctl(vm_get_fd(vm), KVM_SET_GSI_ROUTING, routing);
+ ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing);
free(routing);
return ret;
@@ -2244,8 +1622,7 @@ void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
int ret;
ret = _kvm_gsi_routing_write(vm, routing);
- TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING failed, rc: %i errno: %i",
- ret, errno);
+ TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_GSI_ROUTING, ret));
}
/*
@@ -2267,7 +1644,7 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
int ctr;
struct userspace_mem_region *region;
- struct vcpu *vcpu;
+ struct kvm_vcpu *vcpu;
fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode);
fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
@@ -2292,8 +1669,9 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
virt_dump(stream, vm, indent + 4);
}
fprintf(stream, "%*sVCPUs:\n", indent, "");
+
list_for_each_entry(vcpu, &vm->vcpus, list)
- vcpu_dump(stream, vm, vcpu->id, indent + 2);
+ vcpu_dump(stream, vcpu, indent + 2);
}
/* Known KVM exit reasons */
@@ -2447,64 +1825,11 @@ void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
}
-/*
- * Is Unrestricted Guest
- *
- * Input Args:
- * vm - Virtual Machine
- *
- * Output Args: None
- *
- * Return: True if the unrestricted guest is set to 'Y', otherwise return false.
- *
- * Check if the unrestricted guest flag is enabled.
- */
-bool vm_is_unrestricted_guest(struct kvm_vm *vm)
-{
- char val = 'N';
- size_t count;
- FILE *f;
-
- if (vm == NULL) {
- /* Ensure that the KVM vendor-specific module is loaded. */
- close(open_kvm_dev_path_or_exit());
- }
-
- f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r");
- if (f) {
- count = fread(&val, sizeof(char), 1, f);
- TEST_ASSERT(count == 1, "Unable to read from param file.");
- fclose(f);
- }
-
- return val == 'Y';
-}
-
-unsigned int vm_get_page_size(struct kvm_vm *vm)
-{
- return vm->page_size;
-}
-
-unsigned int vm_get_page_shift(struct kvm_vm *vm)
-{
- return vm->page_shift;
-}
-
-unsigned long __attribute__((weak)) vm_compute_max_gfn(struct kvm_vm *vm)
+unsigned long __weak vm_compute_max_gfn(struct kvm_vm *vm)
{
return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
}
-uint64_t vm_get_max_gfn(struct kvm_vm *vm)
-{
- return vm->max_gfn;
-}
-
-int vm_get_fd(struct kvm_vm *vm)
-{
- return vm->fd;
-}
-
static unsigned int vm_calc_num_pages(unsigned int num_pages,
unsigned int page_shift,
unsigned int new_page_shift,
@@ -2545,14 +1870,112 @@ unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size)
return vm_adjust_num_guest_pages(mode, n);
}
-int vm_get_stats_fd(struct kvm_vm *vm)
+/*
+ * Read binary stats descriptors
+ *
+ * Input Args:
+ * stats_fd - the file descriptor for the binary stats file from which to read
+ * header - the binary stats metadata header corresponding to the given FD
+ *
+ * Output Args: None
+ *
+ * Return:
+ * A pointer to a newly allocated series of stat descriptors.
+ * Caller is responsible for freeing the returned kvm_stats_desc.
+ *
+ * Read the stats descriptors from the binary stats interface.
+ */
+struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
+ struct kvm_stats_header *header)
{
- return ioctl(vm->fd, KVM_GET_STATS_FD, NULL);
+ struct kvm_stats_desc *stats_desc;
+ ssize_t desc_size, total_size, ret;
+
+ desc_size = get_stats_descriptor_size(header);
+ total_size = header->num_desc * desc_size;
+
+ stats_desc = calloc(header->num_desc, desc_size);
+ TEST_ASSERT(stats_desc, "Allocate memory for stats descriptors");
+
+ ret = pread(stats_fd, stats_desc, total_size, header->desc_offset);
+ TEST_ASSERT(ret == total_size, "Read KVM stats descriptors");
+
+ return stats_desc;
}
-int vcpu_get_stats_fd(struct kvm_vm *vm, uint32_t vcpuid)
+/*
+ * Read stat data for a particular stat
+ *
+ * Input Args:
+ * stats_fd - the file descriptor for the binary stats file from which to read
+ * header - the binary stats metadata header corresponding to the given FD
+ * desc - the binary stat metadata for the particular stat to be read
+ * max_elements - the maximum number of 8-byte values to read into data
+ *
+ * Output Args:
+ * data - the buffer into which stat data should be read
+ *
+ * Read the data values of a specified stat from the binary stats interface.
+ */
+void read_stat_data(int stats_fd, struct kvm_stats_header *header,
+ struct kvm_stats_desc *desc, uint64_t *data,
+ size_t max_elements)
{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+ size_t nr_elements = min_t(ssize_t, desc->size, max_elements);
+ size_t size = nr_elements * sizeof(*data);
+ ssize_t ret;
- return ioctl(vcpu->fd, KVM_GET_STATS_FD, NULL);
+ TEST_ASSERT(desc->size, "No elements in stat '%s'", desc->name);
+ TEST_ASSERT(max_elements, "Zero elements requested for stat '%s'", desc->name);
+
+ ret = pread(stats_fd, data, size,
+ header->data_offset + desc->offset);
+
+ TEST_ASSERT(ret >= 0, "pread() failed on stat '%s', errno: %i (%s)",
+ desc->name, errno, strerror(errno));
+ TEST_ASSERT(ret == size,
+ "pread() on stat '%s' read %ld bytes, wanted %lu bytes",
+ desc->name, size, ret);
+}
+
+/*
+ * Read the data of the named stat
+ *
+ * Input Args:
+ * vm - the VM for which the stat should be read
+ * stat_name - the name of the stat to read
+ * max_elements - the maximum number of 8-byte values to read into data
+ *
+ * Output Args:
+ * data - the buffer into which stat data should be read
+ *
+ * Read the data values of a specified stat from the binary stats interface.
+ */
+void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
+ size_t max_elements)
+{
+ struct kvm_stats_desc *desc;
+ size_t size_desc;
+ int i;
+
+ if (!vm->stats_fd) {
+ vm->stats_fd = vm_get_stats_fd(vm);
+ read_stats_header(vm->stats_fd, &vm->stats_header);
+ vm->stats_desc = read_stats_descriptors(vm->stats_fd,
+ &vm->stats_header);
+ }
+
+ size_desc = get_stats_descriptor_size(&vm->stats_header);
+
+ for (i = 0; i < vm->stats_header.num_desc; ++i) {
+ desc = (void *)vm->stats_desc + (i * size_desc);
+
+ if (strcmp(desc->name, stat_name))
+ continue;
+
+ read_stat_data(vm->stats_fd, &vm->stats_header, desc,
+ data, max_elements);
+
+ break;
+ }
}
diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
deleted file mode 100644
index a03febc24ba6..000000000000
--- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * tools/testing/selftests/kvm/lib/kvm_util_internal.h
- *
- * Copyright (C) 2018, Google LLC.
- */
-
-#ifndef SELFTEST_KVM_UTIL_INTERNAL_H
-#define SELFTEST_KVM_UTIL_INTERNAL_H
-
-#include "linux/hashtable.h"
-#include "linux/rbtree.h"
-
-#include "sparsebit.h"
-
-struct userspace_mem_region {
- struct kvm_userspace_memory_region region;
- struct sparsebit *unused_phy_pages;
- int fd;
- off_t offset;
- void *host_mem;
- void *host_alias;
- void *mmap_start;
- void *mmap_alias;
- size_t mmap_size;
- struct rb_node gpa_node;
- struct rb_node hva_node;
- struct hlist_node slot_node;
-};
-
-struct vcpu {
- struct list_head list;
- uint32_t id;
- int fd;
- struct kvm_run *state;
- struct kvm_dirty_gfn *dirty_gfns;
- uint32_t fetch_index;
- uint32_t dirty_gfns_count;
-};
-
-struct userspace_mem_regions {
- struct rb_root gpa_tree;
- struct rb_root hva_tree;
- DECLARE_HASHTABLE(slot_hash, 9);
-};
-
-struct kvm_vm {
- int mode;
- unsigned long type;
- int kvm_fd;
- int fd;
- unsigned int pgtable_levels;
- unsigned int page_size;
- unsigned int page_shift;
- unsigned int pa_bits;
- unsigned int va_bits;
- uint64_t max_gfn;
- struct list_head vcpus;
- struct userspace_mem_regions regions;
- struct sparsebit *vpages_valid;
- struct sparsebit *vpages_mapped;
- bool has_irqchip;
- bool pgd_created;
- vm_paddr_t pgd;
- vm_vaddr_t gdt;
- vm_vaddr_t tss;
- vm_vaddr_t idt;
- vm_vaddr_t handlers;
- uint32_t dirty_ring_size;
-};
-
-struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid);
-
-/*
- * Virtual Translation Tables Dump
- *
- * Input Args:
- * stream - Output FILE stream
- * vm - Virtual Machine
- * indent - Left margin indent amount
- *
- * Output Args: None
- *
- * Return: None
- *
- * Dumps to the FILE stream given by @stream, the contents of all the
- * virtual translation tables for the VM given by @vm.
- */
-void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
-
-/*
- * Register Dump
- *
- * Input Args:
- * stream - Output FILE stream
- * regs - Registers
- * indent - Left margin indent amount
- *
- * Output Args: None
- *
- * Return: None
- *
- * Dumps the state of the registers given by @regs, to the FILE stream
- * given by @stream.
- */
-void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent);
-
-/*
- * System Register Dump
- *
- * Input Args:
- * stream - Output FILE stream
- * sregs - System registers
- * indent - Left margin indent amount
- *
- * Output Args: None
- *
- * Return: None
- *
- * Dumps the state of the system registers given by @sregs, to the FILE stream
- * given by @stream.
- */
-void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent);
-
-struct userspace_mem_region *
-memslot2region(struct kvm_vm *vm, uint32_t memslot);
-
-#endif /* SELFTEST_KVM_UTIL_INTERNAL_H */
diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c
index 722df3a28791..9618b37c66f7 100644
--- a/tools/testing/selftests/kvm/lib/perf_test_util.c
+++ b/tools/testing/selftests/kvm/lib/perf_test_util.c
@@ -17,8 +17,8 @@ struct perf_test_args perf_test_args;
static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
struct vcpu_thread {
- /* The id of the vCPU. */
- int vcpu_id;
+ /* The index of the vCPU. */
+ int vcpu_idx;
/* The pthread backing the vCPU. */
pthread_t thread;
@@ -36,24 +36,26 @@ static void (*vcpu_thread_fn)(struct perf_test_vcpu_args *);
/* Set to true once all vCPU threads are up and running. */
static bool all_vcpu_threads_running;
+static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+
/*
* Continuously write to the first 8 bytes of each page in the
* specified region.
*/
-static void guest_code(uint32_t vcpu_id)
+void perf_test_guest_code(uint32_t vcpu_idx)
{
struct perf_test_args *pta = &perf_test_args;
- struct perf_test_vcpu_args *vcpu_args = &pta->vcpu_args[vcpu_id];
+ struct perf_test_vcpu_args *vcpu_args = &pta->vcpu_args[vcpu_idx];
uint64_t gva;
uint64_t pages;
int i;
- /* Make sure vCPU args data structure is not corrupt. */
- GUEST_ASSERT(vcpu_args->vcpu_id == vcpu_id);
-
gva = vcpu_args->gva;
pages = vcpu_args->pages;
+ /* Make sure vCPU args data structure is not corrupt. */
+ GUEST_ASSERT(vcpu_args->vcpu_idx == vcpu_idx);
+
while (true) {
for (i = 0; i < pages; i++) {
uint64_t addr = gva + (i * pta->guest_page_size);
@@ -68,48 +70,52 @@ static void guest_code(uint32_t vcpu_id)
}
}
-void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus,
+void perf_test_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
+ struct kvm_vcpu *vcpus[],
uint64_t vcpu_memory_bytes,
bool partition_vcpu_memory_access)
{
struct perf_test_args *pta = &perf_test_args;
struct perf_test_vcpu_args *vcpu_args;
- int vcpu_id;
+ int i;
- for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
- vcpu_args = &pta->vcpu_args[vcpu_id];
+ for (i = 0; i < nr_vcpus; i++) {
+ vcpu_args = &pta->vcpu_args[i];
+
+ vcpu_args->vcpu = vcpus[i];
+ vcpu_args->vcpu_idx = i;
- vcpu_args->vcpu_id = vcpu_id;
if (partition_vcpu_memory_access) {
vcpu_args->gva = guest_test_virt_mem +
- (vcpu_id * vcpu_memory_bytes);
+ (i * vcpu_memory_bytes);
vcpu_args->pages = vcpu_memory_bytes /
pta->guest_page_size;
- vcpu_args->gpa = pta->gpa + (vcpu_id * vcpu_memory_bytes);
+ vcpu_args->gpa = pta->gpa + (i * vcpu_memory_bytes);
} else {
vcpu_args->gva = guest_test_virt_mem;
- vcpu_args->pages = (vcpus * vcpu_memory_bytes) /
+ vcpu_args->pages = (nr_vcpus * vcpu_memory_bytes) /
pta->guest_page_size;
vcpu_args->gpa = pta->gpa;
}
- vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
+ vcpu_args_set(vcpus[i], 1, i);
pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
- vcpu_id, vcpu_args->gpa, vcpu_args->gpa +
+ i, vcpu_args->gpa, vcpu_args->gpa +
(vcpu_args->pages * pta->guest_page_size));
}
}
-struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
+struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
uint64_t vcpu_memory_bytes, int slots,
enum vm_mem_backing_src_type backing_src,
bool partition_vcpu_memory_access)
{
struct perf_test_args *pta = &perf_test_args;
struct kvm_vm *vm;
- uint64_t guest_num_pages;
+ uint64_t guest_num_pages, slot0_pages = 0;
uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src);
+ uint64_t region_end_gfn;
int i;
pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
@@ -124,7 +130,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
pta->guest_page_size = vm_guest_mode_params[mode].page_size;
guest_num_pages = vm_adjust_num_guest_pages(mode,
- (vcpus * vcpu_memory_bytes) / pta->guest_page_size);
+ (nr_vcpus * vcpu_memory_bytes) / pta->guest_page_size);
TEST_ASSERT(vcpu_memory_bytes % getpagesize() == 0,
"Guest memory size is not host page size aligned.");
@@ -135,33 +141,52 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
slots);
/*
+ * If using nested, allocate extra pages for the nested page tables and
+ * in-memory data structures.
+ */
+ if (pta->nested)
+ slot0_pages += perf_test_nested_pages(nr_vcpus);
+
+ /*
* Pass guest_num_pages to populate the page tables for test memory.
* The memory is also added to memslot 0, but that's a benign side
* effect as KVM allows aliasing HVAs in meslots.
*/
- vm = vm_create_with_vcpus(mode, vcpus, DEFAULT_GUEST_PHY_PAGES,
- guest_num_pages, 0, guest_code, NULL);
+ vm = __vm_create_with_vcpus(mode, nr_vcpus, slot0_pages + guest_num_pages,
+ perf_test_guest_code, vcpus);
pta->vm = vm;
+ /* Put the test region at the top guest physical memory. */
+ region_end_gfn = vm->max_gfn + 1;
+
+#ifdef __x86_64__
+ /*
+ * When running vCPUs in L2, restrict the test region to 48 bits to
+ * avoid needing 5-level page tables to identity map L2.
+ */
+ if (pta->nested)
+ region_end_gfn = min(region_end_gfn, (1UL << 48) / pta->guest_page_size);
+#endif
/*
* If there should be more memory in the guest test region than there
* can be pages in the guest, it will definitely cause problems.
*/
- TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm),
+ TEST_ASSERT(guest_num_pages < region_end_gfn,
"Requested more guest memory than address space allows.\n"
" guest pages: %" PRIx64 " max gfn: %" PRIx64
- " vcpus: %d wss: %" PRIx64 "]\n",
- guest_num_pages, vm_get_max_gfn(vm), vcpus,
- vcpu_memory_bytes);
+ " nr_vcpus: %d wss: %" PRIx64 "]\n",
+ guest_num_pages, region_end_gfn - 1, nr_vcpus, vcpu_memory_bytes);
- pta->gpa = (vm_get_max_gfn(vm) - guest_num_pages) * pta->guest_page_size;
+ pta->gpa = (region_end_gfn - guest_num_pages - 1) * pta->guest_page_size;
pta->gpa = align_down(pta->gpa, backing_src_pagesz);
#ifdef __s390x__
/* Align to 1M (segment size) */
pta->gpa = align_down(pta->gpa, 1 << 20);
#endif
- pr_info("guest physical test memory offset: 0x%lx\n", pta->gpa);
+ pta->size = guest_num_pages * pta->guest_page_size;
+ pr_info("guest physical test memory: [0x%lx, 0x%lx)\n",
+ pta->gpa, pta->gpa + pta->size);
/* Add extra memory slots for testing */
for (i = 0; i < slots; i++) {
@@ -176,7 +201,13 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
/* Do mapping for the demand paging memory slot */
virt_map(vm, guest_test_virt_mem, pta->gpa, guest_num_pages);
- perf_test_setup_vcpus(vm, vcpus, vcpu_memory_bytes, partition_vcpu_memory_access);
+ perf_test_setup_vcpus(vm, nr_vcpus, vcpus, vcpu_memory_bytes,
+ partition_vcpu_memory_access);
+
+ if (pta->nested) {
+ pr_info("Configuring vCPUs to run in L2 (nested).\n");
+ perf_test_setup_nested(vm, nr_vcpus, vcpus);
+ }
ucall_init(vm, NULL);
@@ -198,6 +229,17 @@ void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract)
sync_global_to_guest(vm, perf_test_args);
}
+uint64_t __weak perf_test_nested_pages(int nr_vcpus)
+{
+ return 0;
+}
+
+void __weak perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu **vcpus)
+{
+ pr_info("%s() not support on this architecture, skipping.\n", __func__);
+ exit(KSFT_SKIP);
+}
+
static void *vcpu_thread_main(void *data)
{
struct vcpu_thread *vcpu = data;
@@ -213,39 +255,40 @@ static void *vcpu_thread_main(void *data)
while (!READ_ONCE(all_vcpu_threads_running))
;
- vcpu_thread_fn(&perf_test_args.vcpu_args[vcpu->vcpu_id]);
+ vcpu_thread_fn(&perf_test_args.vcpu_args[vcpu->vcpu_idx]);
return NULL;
}
-void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *))
+void perf_test_start_vcpu_threads(int nr_vcpus,
+ void (*vcpu_fn)(struct perf_test_vcpu_args *))
{
- int vcpu_id;
+ int i;
vcpu_thread_fn = vcpu_fn;
WRITE_ONCE(all_vcpu_threads_running, false);
- for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
- struct vcpu_thread *vcpu = &vcpu_threads[vcpu_id];
+ for (i = 0; i < nr_vcpus; i++) {
+ struct vcpu_thread *vcpu = &vcpu_threads[i];
- vcpu->vcpu_id = vcpu_id;
+ vcpu->vcpu_idx = i;
WRITE_ONCE(vcpu->running, false);
pthread_create(&vcpu->thread, NULL, vcpu_thread_main, vcpu);
}
- for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
- while (!READ_ONCE(vcpu_threads[vcpu_id].running))
+ for (i = 0; i < nr_vcpus; i++) {
+ while (!READ_ONCE(vcpu_threads[i].running))
;
}
WRITE_ONCE(all_vcpu_threads_running, true);
}
-void perf_test_join_vcpu_threads(int vcpus)
+void perf_test_join_vcpu_threads(int nr_vcpus)
{
- int vcpu_id;
+ int i;
- for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++)
- pthread_join(vcpu_threads[vcpu_id].thread, NULL);
+ for (i = 0; i < nr_vcpus; i++)
+ pthread_join(vcpu_threads[i].thread, NULL);
}
diff --git a/tools/testing/selftests/kvm/lib/riscv/processor.c b/tools/testing/selftests/kvm/lib/riscv/processor.c
index 3961487a4870..604478151212 100644
--- a/tools/testing/selftests/kvm/lib/riscv/processor.c
+++ b/tools/testing/selftests/kvm/lib/riscv/processor.c
@@ -9,7 +9,6 @@
#include <assert.h>
#include "kvm_util.h"
-#include "../kvm_util_internal.h"
#include "processor.h"
#define DEFAULT_RISCV_GUEST_STACK_VADDR_MIN 0xac0000
@@ -54,7 +53,7 @@ static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level)
return (gva & pte_index_mask[level]) >> pte_index_shift[level];
}
-void virt_pgd_alloc(struct kvm_vm *vm)
+void virt_arch_pgd_alloc(struct kvm_vm *vm)
{
if (!vm->pgd_created) {
vm_paddr_t paddr = vm_phy_pages_alloc(vm,
@@ -65,7 +64,7 @@ void virt_pgd_alloc(struct kvm_vm *vm)
}
}
-void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
+void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
{
uint64_t *ptep, next_ppn;
int level = vm->pgtable_levels - 1;
@@ -109,7 +108,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
PGTBL_PTE_PERM_MASK | PGTBL_PTE_VALID_MASK;
}
-vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
{
uint64_t *ptep;
int level = vm->pgtable_levels - 1;
@@ -160,7 +159,7 @@ static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent,
#endif
}
-void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
+void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
int level = vm->pgtable_levels - 1;
uint64_t pgd, *ptep;
@@ -179,8 +178,9 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
}
}
-void riscv_vcpu_mmu_setup(struct kvm_vm *vm, int vcpuid)
+void riscv_vcpu_mmu_setup(struct kvm_vcpu *vcpu)
{
+ struct kvm_vm *vm = vcpu->vm;
unsigned long satp;
/*
@@ -199,46 +199,46 @@ void riscv_vcpu_mmu_setup(struct kvm_vm *vm, int vcpuid)
satp = (vm->pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN;
satp |= SATP_MODE_48;
- set_reg(vm, vcpuid, RISCV_CSR_REG(satp), satp);
+ vcpu_set_reg(vcpu, RISCV_CSR_REG(satp), satp);
}
-void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
+void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
{
struct kvm_riscv_core core;
- get_reg(vm, vcpuid, RISCV_CORE_REG(mode), &core.mode);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.pc), &core.regs.pc);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.ra), &core.regs.ra);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.sp), &core.regs.sp);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.gp), &core.regs.gp);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.tp), &core.regs.tp);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t0), &core.regs.t0);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t1), &core.regs.t1);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t2), &core.regs.t2);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s0), &core.regs.s0);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s1), &core.regs.s1);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a0), &core.regs.a0);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a1), &core.regs.a1);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a2), &core.regs.a2);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a3), &core.regs.a3);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a4), &core.regs.a4);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a5), &core.regs.a5);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a6), &core.regs.a6);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a7), &core.regs.a7);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s2), &core.regs.s2);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s3), &core.regs.s3);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s4), &core.regs.s4);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s5), &core.regs.s5);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s6), &core.regs.s6);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s7), &core.regs.s7);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s8), &core.regs.s8);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s9), &core.regs.s9);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s10), &core.regs.s10);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s11), &core.regs.s11);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t3), &core.regs.t3);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t4), &core.regs.t4);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t5), &core.regs.t5);
- get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t6), &core.regs.t6);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(mode), &core.mode);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.pc), &core.regs.pc);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.ra), &core.regs.ra);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.sp), &core.regs.sp);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.gp), &core.regs.gp);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.tp), &core.regs.tp);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t0), &core.regs.t0);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t1), &core.regs.t1);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t2), &core.regs.t2);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s0), &core.regs.s0);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s1), &core.regs.s1);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a0), &core.regs.a0);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a1), &core.regs.a1);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a2), &core.regs.a2);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a3), &core.regs.a3);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a4), &core.regs.a4);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a5), &core.regs.a5);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a6), &core.regs.a6);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a7), &core.regs.a7);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s2), &core.regs.s2);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s3), &core.regs.s3);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s4), &core.regs.s4);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s5), &core.regs.s5);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s6), &core.regs.s6);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s7), &core.regs.s7);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s8), &core.regs.s8);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s9), &core.regs.s9);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s10), &core.regs.s10);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s11), &core.regs.s11);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t3), &core.regs.t3);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t4), &core.regs.t4);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t5), &core.regs.t5);
+ vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t6), &core.regs.t6);
fprintf(stream,
" MODE: 0x%lx\n", core.mode);
@@ -268,13 +268,15 @@ void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
core.regs.t3, core.regs.t4, core.regs.t5, core.regs.t6);
}
-static void __aligned(16) guest_hang(void)
+static void __aligned(16) guest_unexp_trap(void)
{
- while (1)
- ;
+ sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT,
+ KVM_RISCV_SELFTESTS_SBI_UNEXP,
+ 0, 0, 0, 0, 0, 0);
}
-void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+ void *guest_code)
{
int r;
size_t stack_size = vm->page_size == 4096 ?
@@ -284,9 +286,10 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
DEFAULT_RISCV_GUEST_STACK_VADDR_MIN);
unsigned long current_gp = 0;
struct kvm_mp_state mps;
+ struct kvm_vcpu *vcpu;
- vm_vcpu_add(vm, vcpuid);
- riscv_vcpu_mmu_setup(vm, vcpuid);
+ vcpu = __vm_vcpu_add(vm, vcpu_id);
+ riscv_vcpu_mmu_setup(vcpu);
/*
* With SBI HSM support in KVM RISC-V, all secondary VCPUs are
@@ -294,26 +297,25 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
* are powered-on using KVM_SET_MP_STATE ioctl().
*/
mps.mp_state = KVM_MP_STATE_RUNNABLE;
- r = _vcpu_ioctl(vm, vcpuid, KVM_SET_MP_STATE, &mps);
+ r = __vcpu_ioctl(vcpu, KVM_SET_MP_STATE, &mps);
TEST_ASSERT(!r, "IOCTL KVM_SET_MP_STATE failed (error %d)", r);
/* Setup global pointer of guest to be same as the host */
asm volatile (
"add %0, gp, zero" : "=r" (current_gp) : : "memory");
- set_reg(vm, vcpuid, RISCV_CORE_REG(regs.gp), current_gp);
+ vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.gp), current_gp);
/* Setup stack pointer and program counter of guest */
- set_reg(vm, vcpuid, RISCV_CORE_REG(regs.sp),
- stack_vaddr + stack_size);
- set_reg(vm, vcpuid, RISCV_CORE_REG(regs.pc),
- (unsigned long)guest_code);
+ vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size);
+ vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code);
/* Setup default exception vector of guest */
- set_reg(vm, vcpuid, RISCV_CSR_REG(stvec),
- (unsigned long)guest_hang);
+ vcpu_set_reg(vcpu, RISCV_CSR_REG(stvec), (unsigned long)guest_unexp_trap);
+
+ return vcpu;
}
-void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
+void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
{
va_list ap;
uint64_t id = RISCV_CORE_REG(regs.a0);
@@ -350,13 +352,13 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
case 7:
id = RISCV_CORE_REG(regs.a7);
break;
- };
- set_reg(vm, vcpuid, id, va_arg(ap, uint64_t));
+ }
+ vcpu_set_reg(vcpu, id, va_arg(ap, uint64_t));
}
va_end(ap);
}
-void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
+void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
{
}
diff --git a/tools/testing/selftests/kvm/lib/riscv/ucall.c b/tools/testing/selftests/kvm/lib/riscv/ucall.c
index 9e42d8248fa6..087b9740bc8f 100644
--- a/tools/testing/selftests/kvm/lib/riscv/ucall.c
+++ b/tools/testing/selftests/kvm/lib/riscv/ucall.c
@@ -8,7 +8,6 @@
#include <linux/kvm.h>
#include "kvm_util.h"
-#include "../kvm_util_internal.h"
#include "processor.h"
void ucall_init(struct kvm_vm *vm, void *arg)
@@ -53,34 +52,46 @@ void ucall(uint64_t cmd, int nargs, ...)
va_list va;
int i;
- nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
+ nargs = min(nargs, UCALL_MAX_ARGS);
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
uc.args[i] = va_arg(va, uint64_t);
va_end(va);
- sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT, 0, (vm_vaddr_t)&uc,
- 0, 0, 0, 0, 0);
+ sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT,
+ KVM_RISCV_SELFTESTS_SBI_UCALL,
+ (vm_vaddr_t)&uc, 0, 0, 0, 0, 0);
}
-uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
+uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{
- struct kvm_run *run = vcpu_state(vm, vcpu_id);
+ struct kvm_run *run = vcpu->run;
struct ucall ucall = {};
if (uc)
memset(uc, 0, sizeof(*uc));
if (run->exit_reason == KVM_EXIT_RISCV_SBI &&
- run->riscv_sbi.extension_id == KVM_RISCV_SELFTESTS_SBI_EXT &&
- run->riscv_sbi.function_id == 0) {
- memcpy(&ucall, addr_gva2hva(vm, run->riscv_sbi.args[0]),
- sizeof(ucall));
-
- vcpu_run_complete_io(vm, vcpu_id);
- if (uc)
- memcpy(uc, &ucall, sizeof(ucall));
+ run->riscv_sbi.extension_id == KVM_RISCV_SELFTESTS_SBI_EXT) {
+ switch (run->riscv_sbi.function_id) {
+ case KVM_RISCV_SELFTESTS_SBI_UCALL:
+ memcpy(&ucall,
+ addr_gva2hva(vcpu->vm, run->riscv_sbi.args[0]),
+ sizeof(ucall));
+
+ vcpu_run_complete_io(vcpu);
+ if (uc)
+ memcpy(uc, &ucall, sizeof(ucall));
+
+ break;
+ case KVM_RISCV_SELFTESTS_SBI_UNEXP:
+ vcpu_dump(stderr, vcpu, 2);
+ TEST_ASSERT(0, "Unexpected trap taken by guest");
+ break;
+ default:
+ break;
+ }
}
return ucall.cmd;
diff --git a/tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c b/tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c
index 86b9e611ad87..cdb7daeed5fd 100644
--- a/tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c
+++ b/tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c
@@ -8,8 +8,6 @@
#include "test_util.h"
#include "kvm_util.h"
-#define VCPU_ID 6
-
#define ICPT_INSTRUCTION 0x04
#define IPA0_DIAG 0x8300
@@ -27,14 +25,15 @@ static void guest_code(void)
*/
static uint64_t diag318_handler(void)
{
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
uint64_t reg;
uint64_t diag318_info;
- vm = vm_create_default(VCPU_ID, 0, guest_code);
- vcpu_run(vm, VCPU_ID);
- run = vcpu_state(vm, VCPU_ID);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+ vcpu_run(vcpu);
+ run = vcpu->run;
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
"DIAGNOSE 0x0318 instruction was not intercepted");
@@ -62,7 +61,7 @@ uint64_t get_diag318_info(void)
* If KVM does not support diag318, then return 0 to
* ensure tests do not break.
*/
- if (!kvm_check_cap(KVM_CAP_S390_DIAG318)) {
+ if (!kvm_has_cap(KVM_CAP_S390_DIAG318)) {
if (!printed_skip) {
fprintf(stdout, "KVM_CAP_S390_DIAG318 not supported. "
"Skipping diag318 test.\n");
diff --git a/tools/testing/selftests/kvm/lib/s390x/processor.c b/tools/testing/selftests/kvm/lib/s390x/processor.c
index f87c7137598e..89d7340d9cbd 100644
--- a/tools/testing/selftests/kvm/lib/s390x/processor.c
+++ b/tools/testing/selftests/kvm/lib/s390x/processor.c
@@ -7,11 +7,10 @@
#include "processor.h"
#include "kvm_util.h"
-#include "../kvm_util_internal.h"
#define PAGES_PER_REGION 4
-void virt_pgd_alloc(struct kvm_vm *vm)
+void virt_arch_pgd_alloc(struct kvm_vm *vm)
{
vm_paddr_t paddr;
@@ -47,7 +46,7 @@ static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri)
| ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH);
}
-void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
+void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
{
int ri, idx;
uint64_t *entry;
@@ -86,7 +85,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
entry[idx] = gpa;
}
-vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
{
int ri, idx;
uint64_t *entry;
@@ -147,7 +146,7 @@ static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent,
}
}
-void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
+void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
if (!vm->pgd_created)
return;
@@ -155,12 +154,14 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
virt_dump_region(stream, vm, indent, vm->pgd);
}
-void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+ void *guest_code)
{
size_t stack_size = DEFAULT_STACK_PGS * getpagesize();
uint64_t stack_vaddr;
struct kvm_regs regs;
struct kvm_sregs sregs;
+ struct kvm_vcpu *vcpu;
struct kvm_run *run;
TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
@@ -169,24 +170,26 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
stack_vaddr = vm_vaddr_alloc(vm, stack_size,
DEFAULT_GUEST_STACK_VADDR_MIN);
- vm_vcpu_add(vm, vcpuid);
+ vcpu = __vm_vcpu_add(vm, vcpu_id);
/* Setup guest registers */
- vcpu_regs_get(vm, vcpuid, &regs);
+ vcpu_regs_get(vcpu, &regs);
regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160;
- vcpu_regs_set(vm, vcpuid, &regs);
+ vcpu_regs_set(vcpu, &regs);
- vcpu_sregs_get(vm, vcpuid, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
sregs.crs[0] |= 0x00040000; /* Enable floating point regs */
sregs.crs[1] = vm->pgd | 0xf; /* Primary region table */
- vcpu_sregs_set(vm, vcpuid, &sregs);
+ vcpu_sregs_set(vcpu, &sregs);
- run = vcpu_state(vm, vcpuid);
+ run = vcpu->run;
run->psw_mask = 0x0400000180000000ULL; /* DAT enabled + 64 bit mode */
run->psw_addr = (uintptr_t)guest_code;
+
+ return vcpu;
}
-void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
+void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
{
va_list ap;
struct kvm_regs regs;
@@ -197,26 +200,21 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
num);
va_start(ap, num);
- vcpu_regs_get(vm, vcpuid, &regs);
+ vcpu_regs_get(vcpu, &regs);
for (i = 0; i < num; i++)
regs.gprs[i + 2] = va_arg(ap, uint64_t);
- vcpu_regs_set(vm, vcpuid, &regs);
+ vcpu_regs_set(vcpu, &regs);
va_end(ap);
}
-void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
+void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
-
- if (!vcpu)
- return;
-
fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
- indent, "", vcpu->state->psw_mask, vcpu->state->psw_addr);
+ indent, "", vcpu->run->psw_mask, vcpu->run->psw_addr);
}
-void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
+void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
{
}
diff --git a/tools/testing/selftests/kvm/lib/s390x/ucall.c b/tools/testing/selftests/kvm/lib/s390x/ucall.c
index 9d3b0f15249a..73dc4e21190f 100644
--- a/tools/testing/selftests/kvm/lib/s390x/ucall.c
+++ b/tools/testing/selftests/kvm/lib/s390x/ucall.c
@@ -22,7 +22,7 @@ void ucall(uint64_t cmd, int nargs, ...)
va_list va;
int i;
- nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
+ nargs = min(nargs, UCALL_MAX_ARGS);
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
@@ -33,9 +33,9 @@ void ucall(uint64_t cmd, int nargs, ...)
asm volatile ("diag 0,%0,0x501" : : "a"(&uc) : "memory");
}
-uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
+uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{
- struct kvm_run *run = vcpu_state(vm, vcpu_id);
+ struct kvm_run *run = vcpu->run;
struct ucall ucall = {};
if (uc)
@@ -47,10 +47,10 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
(run->s390_sieic.ipb >> 16) == 0x501) {
int reg = run->s390_sieic.ipa & 0xf;
- memcpy(&ucall, addr_gva2hva(vm, run->s.regs.gprs[reg]),
+ memcpy(&ucall, addr_gva2hva(vcpu->vm, run->s.regs.gprs[reg]),
sizeof(ucall));
- vcpu_run_complete_io(vm, vcpu_id);
+ vcpu_run_complete_io(vcpu);
if (uc)
memcpy(uc, &ucall, sizeof(ucall));
}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c b/tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c
new file mode 100644
index 000000000000..0f344a7c89c4
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * x86_64-specific extensions to perf_test_util.c.
+ *
+ * Copyright (C) 2022, Google, Inc.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "perf_test_util.h"
+#include "processor.h"
+#include "vmx.h"
+
+void perf_test_l2_guest_code(uint64_t vcpu_id)
+{
+ perf_test_guest_code(vcpu_id);
+ vmcall();
+}
+
+extern char perf_test_l2_guest_entry[];
+__asm__(
+"perf_test_l2_guest_entry:"
+" mov (%rsp), %rdi;"
+" call perf_test_l2_guest_code;"
+" ud2;"
+);
+
+static void perf_test_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
+{
+#define L2_GUEST_STACK_SIZE 64
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ unsigned long *rsp;
+
+ GUEST_ASSERT(vmx->vmcs_gpa);
+ GUEST_ASSERT(prepare_for_vmx_operation(vmx));
+ GUEST_ASSERT(load_vmcs(vmx));
+ GUEST_ASSERT(ept_1g_pages_supported());
+
+ rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1];
+ *rsp = vcpu_id;
+ prepare_vmcs(vmx, perf_test_l2_guest_entry, rsp);
+
+ GUEST_ASSERT(!vmlaunch());
+ GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
+ GUEST_DONE();
+}
+
+uint64_t perf_test_nested_pages(int nr_vcpus)
+{
+ /*
+ * 513 page tables is enough to identity-map 256 TiB of L2 with 1G
+ * pages and 4-level paging, plus a few pages per-vCPU for data
+ * structures such as the VMCS.
+ */
+ return 513 + 10 * nr_vcpus;
+}
+
+void perf_test_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm)
+{
+ uint64_t start, end;
+
+ prepare_eptp(vmx, vm, 0);
+
+ /*
+ * Identity map the first 4G and the test region with 1G pages so that
+ * KVM can shadow the EPT12 with the maximum huge page size supported
+ * by the backing source.
+ */
+ nested_identity_map_1g(vmx, vm, 0, 0x100000000ULL);
+
+ start = align_down(perf_test_args.gpa, PG_SIZE_1G);
+ end = align_up(perf_test_args.gpa + perf_test_args.size, PG_SIZE_1G);
+ nested_identity_map_1g(vmx, vm, start, end - start);
+}
+
+void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[])
+{
+ struct vmx_pages *vmx, *vmx0 = NULL;
+ struct kvm_regs regs;
+ vm_vaddr_t vmx_gva;
+ int vcpu_id;
+
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
+
+ for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
+ vmx = vcpu_alloc_vmx(vm, &vmx_gva);
+
+ if (vcpu_id == 0) {
+ perf_test_setup_ept(vmx, vm);
+ vmx0 = vmx;
+ } else {
+ /* Share the same EPT table across all vCPUs. */
+ vmx->eptp = vmx0->eptp;
+ vmx->eptp_hva = vmx0->eptp_hva;
+ vmx->eptp_gpa = vmx0->eptp_gpa;
+ }
+
+ /*
+ * Override the vCPU to run perf_test_l1_guest_code() which will
+ * bounce it into L2 before calling perf_test_guest_code().
+ */
+ vcpu_regs_get(vcpus[vcpu_id], &regs);
+ regs.rip = (unsigned long) perf_test_l1_guest_code;
+ vcpu_regs_set(vcpus[vcpu_id], &regs);
+ vcpu_args_set(vcpus[vcpu_id], 2, vmx_gva, vcpu_id);
+ }
+}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 33ea5e9955d9..2e6e61bbe81b 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -7,7 +7,6 @@
#include "test_util.h"
#include "kvm_util.h"
-#include "../kvm_util_internal.h"
#include "processor.h"
#ifndef NUM_INTERRUPTS
@@ -17,10 +16,11 @@
#define DEFAULT_CODE_SELECTOR 0x8
#define DEFAULT_DATA_SELECTOR 0x10
+#define MAX_NR_CPUID_ENTRIES 100
+
vm_vaddr_t exception_handlers;
-void regs_dump(FILE *stream, struct kvm_regs *regs,
- uint8_t indent)
+static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent)
{
fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx "
"rcx: 0x%.16llx rdx: 0x%.16llx\n",
@@ -43,21 +43,6 @@ void regs_dump(FILE *stream, struct kvm_regs *regs,
regs->rip, regs->rflags);
}
-/*
- * Segment Dump
- *
- * Input Args:
- * stream - Output FILE stream
- * segment - KVM segment
- * indent - Left margin indent amount
- *
- * Output Args: None
- *
- * Return: None
- *
- * Dumps the state of the KVM segment given by @segment, to the FILE stream
- * given by @stream.
- */
static void segment_dump(FILE *stream, struct kvm_segment *segment,
uint8_t indent)
{
@@ -75,21 +60,6 @@ static void segment_dump(FILE *stream, struct kvm_segment *segment,
segment->unusable, segment->padding);
}
-/*
- * dtable Dump
- *
- * Input Args:
- * stream - Output FILE stream
- * dtable - KVM dtable
- * indent - Left margin indent amount
- *
- * Output Args: None
- *
- * Return: None
- *
- * Dumps the state of the KVM dtable given by @dtable, to the FILE stream
- * given by @stream.
- */
static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
uint8_t indent)
{
@@ -99,8 +69,7 @@ static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
dtable->padding[0], dtable->padding[1], dtable->padding[2]);
}
-void sregs_dump(FILE *stream, struct kvm_sregs *sregs,
- uint8_t indent)
+static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent)
{
unsigned int i;
@@ -142,7 +111,7 @@ void sregs_dump(FILE *stream, struct kvm_sregs *sregs,
}
}
-void virt_pgd_alloc(struct kvm_vm *vm)
+void virt_arch_pgd_alloc(struct kvm_vm *vm)
{
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
@@ -158,7 +127,7 @@ static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr,
int level)
{
uint64_t *page_table = addr_gpa2hva(vm, pt_pfn << vm->page_shift);
- int index = vaddr >> (vm->page_shift + level * 9) & 0x1ffu;
+ int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
return &page_table[index];
}
@@ -167,14 +136,14 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
uint64_t pt_pfn,
uint64_t vaddr,
uint64_t paddr,
- int level,
- enum x86_page_size page_size)
+ int current_level,
+ int target_level)
{
- uint64_t *pte = virt_get_pte(vm, pt_pfn, vaddr, level);
+ uint64_t *pte = virt_get_pte(vm, pt_pfn, vaddr, current_level);
if (!(*pte & PTE_PRESENT_MASK)) {
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK;
- if (level == page_size)
+ if (current_level == target_level)
*pte |= PTE_LARGE_MASK | (paddr & PHYSICAL_PAGE_MASK);
else
*pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK;
@@ -184,20 +153,19 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
* a hugepage at this level, and that there isn't a hugepage at
* this level.
*/
- TEST_ASSERT(level != page_size,
+ TEST_ASSERT(current_level != target_level,
"Cannot create hugepage at level: %u, vaddr: 0x%lx\n",
- page_size, vaddr);
+ current_level, vaddr);
TEST_ASSERT(!(*pte & PTE_LARGE_MASK),
"Cannot create page table at level: %u, vaddr: 0x%lx\n",
- level, vaddr);
+ current_level, vaddr);
}
return pte;
}
-void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- enum x86_page_size page_size)
+void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
{
- const uint64_t pg_size = 1ull << ((page_size * 9) + 12);
+ const uint64_t pg_size = PG_LEVEL_SIZE(level);
uint64_t *pml4e, *pdpe, *pde;
uint64_t *pte;
@@ -222,46 +190,43 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
* early if a hugepage was created.
*/
pml4e = virt_create_upper_pte(vm, vm->pgd >> vm->page_shift,
- vaddr, paddr, 3, page_size);
+ vaddr, paddr, PG_LEVEL_512G, level);
if (*pml4e & PTE_LARGE_MASK)
return;
- pdpe = virt_create_upper_pte(vm, PTE_GET_PFN(*pml4e), vaddr, paddr, 2, page_size);
+ pdpe = virt_create_upper_pte(vm, PTE_GET_PFN(*pml4e), vaddr, paddr, PG_LEVEL_1G, level);
if (*pdpe & PTE_LARGE_MASK)
return;
- pde = virt_create_upper_pte(vm, PTE_GET_PFN(*pdpe), vaddr, paddr, 1, page_size);
+ pde = virt_create_upper_pte(vm, PTE_GET_PFN(*pdpe), vaddr, paddr, PG_LEVEL_2M, level);
if (*pde & PTE_LARGE_MASK)
return;
/* Fill in page table entry. */
- pte = virt_get_pte(vm, PTE_GET_PFN(*pde), vaddr, 0);
+ pte = virt_get_pte(vm, PTE_GET_PFN(*pde), vaddr, PG_LEVEL_4K);
TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
"PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
}
-void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
+void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
{
- __virt_pg_map(vm, vaddr, paddr, X86_PAGE_SIZE_4K);
+ __virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K);
}
-static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid,
- uint64_t vaddr)
+static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm,
+ struct kvm_vcpu *vcpu,
+ uint64_t vaddr)
{
uint16_t index[4];
uint64_t *pml4e, *pdpe, *pde;
uint64_t *pte;
- struct kvm_cpuid_entry2 *entry;
struct kvm_sregs sregs;
- int max_phy_addr;
uint64_t rsvd_mask = 0;
- entry = kvm_get_supported_cpuid_index(0x80000008, 0);
- max_phy_addr = entry->eax & 0x000000ff;
/* Set the high bits in the reserved mask. */
- if (max_phy_addr < 52)
- rsvd_mask = GENMASK_ULL(51, max_phy_addr);
+ if (vm->pa_bits < 52)
+ rsvd_mask = GENMASK_ULL(51, vm->pa_bits);
/*
* SDM vol 3, fig 4-11 "Formats of CR3 and Paging-Structure Entries
@@ -269,7 +234,7 @@ static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid,
* If IA32_EFER.NXE = 0 and the P flag of a paging-structure entry is 1,
* the XD flag (bit 63) is reserved.
*/
- vcpu_sregs_get(vm, vcpuid, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
if ((sregs.efer & EFER_NX) == 0) {
rsvd_mask |= PTE_NX_MASK;
}
@@ -321,22 +286,23 @@ static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid,
return &pte[index[0]];
}
-uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr)
+uint64_t vm_get_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
+ uint64_t vaddr)
{
- uint64_t *pte = _vm_get_page_table_entry(vm, vcpuid, vaddr);
+ uint64_t *pte = _vm_get_page_table_entry(vm, vcpu, vaddr);
return *(uint64_t *)pte;
}
-void vm_set_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr,
- uint64_t pte)
+void vm_set_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
+ uint64_t vaddr, uint64_t pte)
{
- uint64_t *new_pte = _vm_get_page_table_entry(vm, vcpuid, vaddr);
+ uint64_t *new_pte = _vm_get_page_table_entry(vm, vcpu, vaddr);
*(uint64_t *)new_pte = pte;
}
-void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
+void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
uint64_t *pml4e, *pml4e_start;
uint64_t *pdpe, *pdpe_start;
@@ -517,7 +483,7 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
kvm_seg_fill_gdt_64bit(vm, segp);
}
-vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
{
uint16_t index[4];
uint64_t *pml4e, *pdpe, *pde;
@@ -580,12 +546,12 @@ static void kvm_setup_tss_64bit(struct kvm_vm *vm, struct kvm_segment *segp,
kvm_seg_fill_gdt_64bit(vm, segp);
}
-static void vcpu_setup(struct kvm_vm *vm, int vcpuid)
+static void vcpu_setup(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
{
struct kvm_sregs sregs;
/* Set mode specific system register values. */
- vcpu_sregs_get(vm, vcpuid, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
sregs.idt.limit = 0;
@@ -609,25 +575,10 @@ static void vcpu_setup(struct kvm_vm *vm, int vcpuid)
}
sregs.cr3 = vm->pgd;
- vcpu_sregs_set(vm, vcpuid, &sregs);
+ vcpu_sregs_set(vcpu, &sregs);
}
-#define CPUID_XFD_BIT (1 << 4)
-static bool is_xfd_supported(void)
-{
- int eax, ebx, ecx, edx;
- const int leaf = 0xd, subleaf = 0x1;
-
- __asm__ __volatile__(
- "cpuid"
- : /* output */ "=a"(eax), "=b"(ebx),
- "=c"(ecx), "=d"(edx)
- : /* input */ "0"(leaf), "2"(subleaf));
-
- return !!(eax & CPUID_XFD_BIT);
-}
-
-void vm_xsave_req_perm(int bit)
+void __vm_xsave_require_permission(int bit, const char *name)
{
int kvm_fd;
u64 bitmask;
@@ -638,26 +589,21 @@ void vm_xsave_req_perm(int bit)
.addr = (unsigned long) &bitmask
};
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XFD));
+
kvm_fd = open_kvm_dev_path_or_exit();
- rc = ioctl(kvm_fd, KVM_GET_DEVICE_ATTR, &attr);
+ rc = __kvm_ioctl(kvm_fd, KVM_GET_DEVICE_ATTR, &attr);
close(kvm_fd);
+
if (rc == -1 && (errno == ENXIO || errno == EINVAL))
- exit(KSFT_SKIP);
- TEST_ASSERT(rc == 0, "KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) error: %ld", rc);
- if (!(bitmask & (1ULL << bit)))
- exit(KSFT_SKIP);
+ __TEST_REQUIRE(0, "KVM_X86_XCOMP_GUEST_SUPP not supported");
- if (!is_xfd_supported())
- exit(KSFT_SKIP);
+ TEST_ASSERT(rc == 0, "KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) error: %ld", rc);
- rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit);
+ __TEST_REQUIRE(bitmask & (1ULL << bit),
+ "Required XSAVE feature '%s' not supported", name);
- /*
- * The older kernel version(<5.15) can't support
- * ARCH_REQ_XCOMP_GUEST_PERM and directly return.
- */
- if (rc)
- return;
+ TEST_REQUIRE(!syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit));
rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, &bitmask);
TEST_ASSERT(rc == 0, "prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc);
@@ -666,108 +612,89 @@ void vm_xsave_req_perm(int bit)
bitmask);
}
-void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+ void *guest_code)
{
struct kvm_mp_state mp_state;
struct kvm_regs regs;
vm_vaddr_t stack_vaddr;
+ struct kvm_vcpu *vcpu;
+
stack_vaddr = vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
DEFAULT_GUEST_STACK_VADDR_MIN);
- /* Create VCPU */
- vm_vcpu_add(vm, vcpuid);
- vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid());
- vcpu_setup(vm, vcpuid);
+ vcpu = __vm_vcpu_add(vm, vcpu_id);
+ vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
+ vcpu_setup(vm, vcpu);
/* Setup guest general purpose registers */
- vcpu_regs_get(vm, vcpuid, &regs);
+ vcpu_regs_get(vcpu, &regs);
regs.rflags = regs.rflags | 0x2;
regs.rsp = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize());
regs.rip = (unsigned long) guest_code;
- vcpu_regs_set(vm, vcpuid, &regs);
+ vcpu_regs_set(vcpu, &regs);
/* Setup the MP state */
mp_state.mp_state = 0;
- vcpu_set_mp_state(vm, vcpuid, &mp_state);
+ vcpu_mp_state_set(vcpu, &mp_state);
+
+ return vcpu;
}
-/*
- * Allocate an instance of struct kvm_cpuid2
- *
- * Input Args: None
- *
- * Output Args: None
- *
- * Return: A pointer to the allocated struct. The caller is responsible
- * for freeing this struct.
- *
- * Since kvm_cpuid2 uses a 0-length array to allow a the size of the
- * array to be decided at allocation time, allocation is slightly
- * complicated. This function uses a reasonable default length for
- * the array and performs the appropriate allocation.
- */
-static struct kvm_cpuid2 *allocate_kvm_cpuid2(void)
+struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id)
{
- struct kvm_cpuid2 *cpuid;
- int nent = 100;
- size_t size;
-
- size = sizeof(*cpuid);
- size += nent * sizeof(struct kvm_cpuid_entry2);
- cpuid = malloc(size);
- if (!cpuid) {
- perror("malloc");
- abort();
- }
+ struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
- cpuid->nent = nent;
+ vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
- return cpuid;
+ return vcpu;
}
-/*
- * KVM Supported CPUID Get
- *
- * Input Args: None
- *
- * Output Args:
- *
- * Return: The supported KVM CPUID
- *
- * Get the guest CPUID supported by KVM.
- */
-struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
+void vcpu_arch_free(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->cpuid)
+ free(vcpu->cpuid);
+}
+
+const struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
{
static struct kvm_cpuid2 *cpuid;
- int ret;
int kvm_fd;
if (cpuid)
return cpuid;
- cpuid = allocate_kvm_cpuid2();
+ cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
kvm_fd = open_kvm_dev_path_or_exit();
- ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
- TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n",
- ret, errno);
+ kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
close(kvm_fd);
return cpuid;
}
-/*
- * KVM Get MSR
- *
- * Input Args:
- * msr_index - Index of MSR
- *
- * Output Args: None
- *
- * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced.
- *
- * Get value of MSR for VCPU.
- */
+bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
+ struct kvm_x86_cpu_feature feature)
+{
+ const struct kvm_cpuid_entry2 *entry;
+ int i;
+
+ for (i = 0; i < cpuid->nent; i++) {
+ entry = &cpuid->entries[i];
+
+ /*
+ * The output registers in kvm_cpuid_entry2 are in alphabetical
+ * order, but kvm_x86_cpu_feature matches that mess, so yay
+ * pointer shenanigans!
+ */
+ if (entry->function == feature.function &&
+ entry->index == feature.index)
+ return (&entry->eax)[feature.reg] & BIT(feature.bit);
+ }
+
+ return false;
+}
+
uint64_t kvm_get_feature_msr(uint64_t msr_index)
{
struct {
@@ -780,218 +707,98 @@ uint64_t kvm_get_feature_msr(uint64_t msr_index)
buffer.entry.index = msr_index;
kvm_fd = open_kvm_dev_path_or_exit();
- r = ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header);
- TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
- " rc: %i errno: %i", r, errno);
+ r = __kvm_ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header);
+ TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_GET_MSRS, r));
close(kvm_fd);
return buffer.entry.data;
}
-/*
- * VM VCPU CPUID Set
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU id
- *
- * Output Args: None
- *
- * Return: KVM CPUID (KVM_GET_CPUID2)
- *
- * Set the VCPU's CPUID.
- */
-struct kvm_cpuid2 *vcpu_get_cpuid(struct kvm_vm *vm, uint32_t vcpuid)
+void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid)
{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
- struct kvm_cpuid2 *cpuid;
- int max_ent;
- int rc = -1;
-
- TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
-
- cpuid = allocate_kvm_cpuid2();
- max_ent = cpuid->nent;
-
- for (cpuid->nent = 1; cpuid->nent <= max_ent; cpuid->nent++) {
- rc = ioctl(vcpu->fd, KVM_GET_CPUID2, cpuid);
- if (!rc)
- break;
+ TEST_ASSERT(cpuid != vcpu->cpuid, "@cpuid can't be the vCPU's CPUID");
- TEST_ASSERT(rc == -1 && errno == E2BIG,
- "KVM_GET_CPUID2 should either succeed or give E2BIG: %d %d",
- rc, errno);
+ /* Allow overriding the default CPUID. */
+ if (vcpu->cpuid && vcpu->cpuid->nent < cpuid->nent) {
+ free(vcpu->cpuid);
+ vcpu->cpuid = NULL;
}
- TEST_ASSERT(rc == 0, "KVM_GET_CPUID2 failed, rc: %i errno: %i",
- rc, errno);
+ if (!vcpu->cpuid)
+ vcpu->cpuid = allocate_kvm_cpuid2(cpuid->nent);
- return cpuid;
+ memcpy(vcpu->cpuid, cpuid, kvm_cpuid2_size(cpuid->nent));
+ vcpu_set_cpuid(vcpu);
}
-
-
-/*
- * Locate a cpuid entry.
- *
- * Input Args:
- * function: The function of the cpuid entry to find.
- * index: The index of the cpuid entry.
- *
- * Output Args: None
- *
- * Return: A pointer to the cpuid entry. Never returns NULL.
- */
-struct kvm_cpuid_entry2 *
-kvm_get_supported_cpuid_index(uint32_t function, uint32_t index)
+void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr)
{
- struct kvm_cpuid2 *cpuid;
- struct kvm_cpuid_entry2 *entry = NULL;
- int i;
-
- cpuid = kvm_get_supported_cpuid();
- for (i = 0; i < cpuid->nent; i++) {
- if (cpuid->entries[i].function == function &&
- cpuid->entries[i].index == index) {
- entry = &cpuid->entries[i];
- break;
- }
- }
+ struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, 0x80000008);
- TEST_ASSERT(entry, "Guest CPUID entry not found: (EAX=%x, ECX=%x).",
- function, index);
- return entry;
+ entry->eax = (entry->eax & ~0xff) | maxphyaddr;
+ vcpu_set_cpuid(vcpu);
}
-
-int __vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_cpuid2 *cpuid)
+void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function)
{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
-
- TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+ struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, function);
- return ioctl(vcpu->fd, KVM_SET_CPUID2, cpuid);
+ entry->eax = 0;
+ entry->ebx = 0;
+ entry->ecx = 0;
+ entry->edx = 0;
+ vcpu_set_cpuid(vcpu);
}
-/*
- * VM VCPU CPUID Set
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU id
- * cpuid - The CPUID values to set.
- *
- * Output Args: None
- *
- * Return: void
- *
- * Set the VCPU's CPUID.
- */
-void vcpu_set_cpuid(struct kvm_vm *vm,
- uint32_t vcpuid, struct kvm_cpuid2 *cpuid)
+void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu,
+ struct kvm_x86_cpu_feature feature,
+ bool set)
{
- int rc;
+ struct kvm_cpuid_entry2 *entry;
+ u32 *reg;
- rc = __vcpu_set_cpuid(vm, vcpuid, cpuid);
- TEST_ASSERT(rc == 0, "KVM_SET_CPUID2 failed, rc: %i errno: %i",
- rc, errno);
+ entry = __vcpu_get_cpuid_entry(vcpu, feature.function, feature.index);
+ reg = (&entry->eax) + feature.reg;
+ if (set)
+ *reg |= BIT(feature.bit);
+ else
+ *reg &= ~BIT(feature.bit);
+
+ vcpu_set_cpuid(vcpu);
}
-/*
- * VCPU Get MSR
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- * msr_index - Index of MSR
- *
- * Output Args: None
- *
- * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced.
- *
- * Get value of MSR for VCPU.
- */
-uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index)
+uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index)
{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
struct {
struct kvm_msrs header;
struct kvm_msr_entry entry;
} buffer = {};
- int r;
- TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
buffer.header.nmsrs = 1;
buffer.entry.index = msr_index;
- r = ioctl(vcpu->fd, KVM_GET_MSRS, &buffer.header);
- TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
- " rc: %i errno: %i", r, errno);
+
+ vcpu_msrs_get(vcpu, &buffer.header);
return buffer.entry.data;
}
-/*
- * _VCPU Set MSR
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- * msr_index - Index of MSR
- * msr_value - New value of MSR
- *
- * Output Args: None
- *
- * Return: The result of KVM_SET_MSRS.
- *
- * Sets the value of an MSR for the given VCPU.
- */
-int _vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
- uint64_t msr_value)
+int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value)
{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
struct {
struct kvm_msrs header;
struct kvm_msr_entry entry;
} buffer = {};
- int r;
- TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
memset(&buffer, 0, sizeof(buffer));
buffer.header.nmsrs = 1;
buffer.entry.index = msr_index;
buffer.entry.data = msr_value;
- r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header);
- return r;
-}
-
-/*
- * VCPU Set MSR
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- * msr_index - Index of MSR
- * msr_value - New value of MSR
- *
- * Output Args: None
- *
- * Return: On success, nothing. On failure a TEST_ASSERT is produced.
- *
- * Set value of MSR for VCPU.
- */
-void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
- uint64_t msr_value)
-{
- int r;
- r = _vcpu_set_msr(vm, vcpuid, msr_index, msr_value);
- TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n"
- " rc: %i errno: %i", r, errno);
+ return __vcpu_ioctl(vcpu, KVM_SET_MSRS, &buffer.header);
}
-void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
+void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
{
va_list ap;
struct kvm_regs regs;
@@ -1001,7 +808,7 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
num);
va_start(ap, num);
- vcpu_regs_get(vm, vcpuid, &regs);
+ vcpu_regs_get(vcpu, &regs);
if (num >= 1)
regs.rdi = va_arg(ap, uint64_t);
@@ -1021,85 +828,112 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
if (num >= 6)
regs.r9 = va_arg(ap, uint64_t);
- vcpu_regs_set(vm, vcpuid, &regs);
+ vcpu_regs_set(vcpu, &regs);
va_end(ap);
}
-void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
+void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
{
struct kvm_regs regs;
struct kvm_sregs sregs;
- fprintf(stream, "%*scpuid: %u\n", indent, "", vcpuid);
+ fprintf(stream, "%*svCPU ID: %u\n", indent, "", vcpu->id);
fprintf(stream, "%*sregs:\n", indent + 2, "");
- vcpu_regs_get(vm, vcpuid, &regs);
+ vcpu_regs_get(vcpu, &regs);
regs_dump(stream, &regs, indent + 4);
fprintf(stream, "%*ssregs:\n", indent + 2, "");
- vcpu_sregs_get(vm, vcpuid, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
sregs_dump(stream, &sregs, indent + 4);
}
-static int kvm_get_num_msrs_fd(int kvm_fd)
+static struct kvm_msr_list *__kvm_get_msr_index_list(bool feature_msrs)
{
+ struct kvm_msr_list *list;
struct kvm_msr_list nmsrs;
- int r;
+ int kvm_fd, r;
+
+ kvm_fd = open_kvm_dev_path_or_exit();
nmsrs.nmsrs = 0;
- r = ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs);
- TEST_ASSERT(r == -1 && errno == E2BIG, "Unexpected result from KVM_GET_MSR_INDEX_LIST probe, r: %i",
- r);
+ if (!feature_msrs)
+ r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs);
+ else
+ r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, &nmsrs);
- return nmsrs.nmsrs;
-}
+ TEST_ASSERT(r == -1 && errno == E2BIG,
+ "Expected -E2BIG, got rc: %i errno: %i (%s)",
+ r, errno, strerror(errno));
-static int kvm_get_num_msrs(struct kvm_vm *vm)
-{
- return kvm_get_num_msrs_fd(vm->kvm_fd);
+ list = malloc(sizeof(*list) + nmsrs.nmsrs * sizeof(list->indices[0]));
+ TEST_ASSERT(list, "-ENOMEM when allocating MSR index list");
+ list->nmsrs = nmsrs.nmsrs;
+
+ if (!feature_msrs)
+ kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
+ else
+ kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, list);
+ close(kvm_fd);
+
+ TEST_ASSERT(list->nmsrs == nmsrs.nmsrs,
+ "Number of MSRs in list changed, was %d, now %d",
+ nmsrs.nmsrs, list->nmsrs);
+ return list;
}
-struct kvm_msr_list *kvm_get_msr_index_list(void)
+const struct kvm_msr_list *kvm_get_msr_index_list(void)
{
- struct kvm_msr_list *list;
- int nmsrs, r, kvm_fd;
+ static const struct kvm_msr_list *list;
- kvm_fd = open_kvm_dev_path_or_exit();
+ if (!list)
+ list = __kvm_get_msr_index_list(false);
+ return list;
+}
- nmsrs = kvm_get_num_msrs_fd(kvm_fd);
- list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
- list->nmsrs = nmsrs;
- r = ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
- close(kvm_fd);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MSR_INDEX_LIST, r: %i",
- r);
+const struct kvm_msr_list *kvm_get_feature_msr_index_list(void)
+{
+ static const struct kvm_msr_list *list;
+ if (!list)
+ list = __kvm_get_msr_index_list(true);
return list;
}
-static int vcpu_save_xsave_state(struct kvm_vm *vm, struct vcpu *vcpu,
- struct kvm_x86_state *state)
+bool kvm_msr_is_in_save_restore_list(uint32_t msr_index)
{
- int size;
+ const struct kvm_msr_list *list = kvm_get_msr_index_list();
+ int i;
- size = vm_check_cap(vm, KVM_CAP_XSAVE2);
- if (!size)
- size = sizeof(struct kvm_xsave);
+ for (i = 0; i < list->nmsrs; ++i) {
+ if (list->indices[i] == msr_index)
+ return true;
+ }
- state->xsave = malloc(size);
- if (size == sizeof(struct kvm_xsave))
- return ioctl(vcpu->fd, KVM_GET_XSAVE, state->xsave);
- else
- return ioctl(vcpu->fd, KVM_GET_XSAVE2, state->xsave);
+ return false;
}
-struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
+static void vcpu_save_xsave_state(struct kvm_vcpu *vcpu,
+ struct kvm_x86_state *state)
{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
- struct kvm_msr_list *list;
+ int size = vm_check_cap(vcpu->vm, KVM_CAP_XSAVE2);
+
+ if (size) {
+ state->xsave = malloc(size);
+ vcpu_xsave2_get(vcpu, state->xsave);
+ } else {
+ state->xsave = malloc(sizeof(struct kvm_xsave));
+ vcpu_xsave_get(vcpu, state->xsave);
+ }
+}
+
+struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu)
+{
+ const struct kvm_msr_list *msr_list = kvm_get_msr_index_list();
struct kvm_x86_state *state;
- int nmsrs, r, i;
+ int i;
+
static int nested_size = -1;
if (nested_size == -1) {
@@ -1115,113 +949,57 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
* kernel with KVM_RUN. Complete IO prior to migrating state
* to a new VM.
*/
- vcpu_run_complete_io(vm, vcpuid);
-
- nmsrs = kvm_get_num_msrs(vm);
- list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
- list->nmsrs = nmsrs;
- r = ioctl(vm->kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MSR_INDEX_LIST, r: %i",
- r);
-
- state = malloc(sizeof(*state) + nmsrs * sizeof(state->msrs.entries[0]));
- r = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, &state->events);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_VCPU_EVENTS, r: %i",
- r);
-
- r = ioctl(vcpu->fd, KVM_GET_MP_STATE, &state->mp_state);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MP_STATE, r: %i",
- r);
-
- r = ioctl(vcpu->fd, KVM_GET_REGS, &state->regs);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_REGS, r: %i",
- r);
-
- r = vcpu_save_xsave_state(vm, vcpu, state);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",
- r);
-
- if (kvm_check_cap(KVM_CAP_XCRS)) {
- r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
- r);
- }
+ vcpu_run_complete_io(vcpu);
+
+ state = malloc(sizeof(*state) + msr_list->nmsrs * sizeof(state->msrs.entries[0]));
+
+ vcpu_events_get(vcpu, &state->events);
+ vcpu_mp_state_get(vcpu, &state->mp_state);
+ vcpu_regs_get(vcpu, &state->regs);
+ vcpu_save_xsave_state(vcpu, state);
- r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i",
- r);
+ if (kvm_has_cap(KVM_CAP_XCRS))
+ vcpu_xcrs_get(vcpu, &state->xcrs);
+
+ vcpu_sregs_get(vcpu, &state->sregs);
if (nested_size) {
state->nested.size = sizeof(state->nested_);
- r = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, &state->nested);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_NESTED_STATE, r: %i",
- r);
+
+ vcpu_nested_state_get(vcpu, &state->nested);
TEST_ASSERT(state->nested.size <= nested_size,
"Nested state size too big, %i (KVM_CHECK_CAP gave %i)",
state->nested.size, nested_size);
- } else
+ } else {
state->nested.size = 0;
+ }
- state->msrs.nmsrs = nmsrs;
- for (i = 0; i < nmsrs; i++)
- state->msrs.entries[i].index = list->indices[i];
- r = ioctl(vcpu->fd, KVM_GET_MSRS, &state->msrs);
- TEST_ASSERT(r == nmsrs, "Unexpected result from KVM_GET_MSRS, r: %i (failed MSR was 0x%x)",
- r, r == nmsrs ? -1 : list->indices[r]);
+ state->msrs.nmsrs = msr_list->nmsrs;
+ for (i = 0; i < msr_list->nmsrs; i++)
+ state->msrs.entries[i].index = msr_list->indices[i];
+ vcpu_msrs_get(vcpu, &state->msrs);
- r = ioctl(vcpu->fd, KVM_GET_DEBUGREGS, &state->debugregs);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_DEBUGREGS, r: %i",
- r);
+ vcpu_debugregs_get(vcpu, &state->debugregs);
- free(list);
return state;
}
-void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *state)
+void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state)
{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
- int r;
-
- r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i",
- r);
-
- r = ioctl(vcpu->fd, KVM_SET_MSRS, &state->msrs);
- TEST_ASSERT(r == state->msrs.nmsrs,
- "Unexpected result from KVM_SET_MSRS, r: %i (failed at %x)",
- r, r == state->msrs.nmsrs ? -1 : state->msrs.entries[r].index);
-
- if (kvm_check_cap(KVM_CAP_XCRS)) {
- r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
- r);
- }
-
- r = ioctl(vcpu->fd, KVM_SET_XSAVE, state->xsave);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
- r);
-
- r = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, &state->events);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_VCPU_EVENTS, r: %i",
- r);
+ vcpu_sregs_set(vcpu, &state->sregs);
+ vcpu_msrs_set(vcpu, &state->msrs);
- r = ioctl(vcpu->fd, KVM_SET_MP_STATE, &state->mp_state);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_MP_STATE, r: %i",
- r);
+ if (kvm_has_cap(KVM_CAP_XCRS))
+ vcpu_xcrs_set(vcpu, &state->xcrs);
- r = ioctl(vcpu->fd, KVM_SET_DEBUGREGS, &state->debugregs);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_DEBUGREGS, r: %i",
- r);
+ vcpu_xsave_set(vcpu, state->xsave);
+ vcpu_events_set(vcpu, &state->events);
+ vcpu_mp_state_set(vcpu, &state->mp_state);
+ vcpu_debugregs_set(vcpu, &state->debugregs);
+ vcpu_regs_set(vcpu, &state->regs);
- r = ioctl(vcpu->fd, KVM_SET_REGS, &state->regs);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_REGS, r: %i",
- r);
-
- if (state->nested.size) {
- r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
- r);
- }
+ if (state->nested.size)
+ vcpu_nested_state_set(vcpu, &state->nested);
}
void kvm_x86_state_cleanup(struct kvm_x86_state *state)
@@ -1233,15 +1011,9 @@ void kvm_x86_state_cleanup(struct kvm_x86_state *state)
static bool cpu_vendor_string_is(const char *vendor)
{
const uint32_t *chunk = (const uint32_t *)vendor;
- int eax, ebx, ecx, edx;
- const int leaf = 0;
-
- __asm__ __volatile__(
- "cpuid"
- : /* output */ "=a"(eax), "=b"(ebx),
- "=c"(ecx), "=d"(edx)
- : /* input */ "0"(leaf), "2"(0));
+ uint32_t eax, ebx, ecx, edx;
+ cpuid(0, &eax, &ebx, &ecx, &edx);
return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
}
@@ -1258,19 +1030,9 @@ bool is_amd_cpu(void)
return cpu_vendor_string_is("AuthenticAMD");
}
-uint32_t kvm_get_cpuid_max_basic(void)
-{
- return kvm_get_supported_cpuid_entry(0)->eax;
-}
-
-uint32_t kvm_get_cpuid_max_extended(void)
-{
- return kvm_get_supported_cpuid_entry(0x80000000)->eax;
-}
-
void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
{
- struct kvm_cpuid_entry2 *entry;
+ const struct kvm_cpuid_entry2 *entry;
bool pae;
/* SDM 4.1.4 */
@@ -1316,6 +1078,20 @@ static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
e->offset2 = addr >> 32;
}
+
+static bool kvm_fixup_exception(struct ex_regs *regs)
+{
+ if (regs->r9 != KVM_EXCEPTION_MAGIC || regs->rip != regs->r10)
+ return false;
+
+ if (regs->vector == DE_VECTOR)
+ return false;
+
+ regs->rip = regs->r11;
+ regs->r9 = regs->vector;
+ return true;
+}
+
void kvm_exit_unexpected_vector(uint32_t value)
{
ucall(UCALL_UNHANDLED, 1, value);
@@ -1331,6 +1107,9 @@ void route_exception(struct ex_regs *regs)
return;
}
+ if (kvm_fixup_exception(regs))
+ return;
+
kvm_exit_unexpected_vector(regs->vector);
}
@@ -1347,17 +1126,18 @@ void vm_init_descriptor_tables(struct kvm_vm *vm)
DEFAULT_CODE_SELECTOR);
}
-void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid)
+void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu)
{
+ struct kvm_vm *vm = vcpu->vm;
struct kvm_sregs sregs;
- vcpu_sregs_get(vm, vcpuid, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
sregs.idt.base = vm->idt;
sregs.idt.limit = NUM_INTERRUPTS * sizeof(struct idt_entry) - 1;
sregs.gdt.base = vm->gdt;
sregs.gdt.limit = getpagesize() - 1;
kvm_seg_set_kernel_data_64bit(NULL, DEFAULT_DATA_SELECTOR, &sregs.gs);
- vcpu_sregs_set(vm, vcpuid, &sregs);
+ vcpu_sregs_set(vcpu, &sregs);
*(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
}
@@ -1369,11 +1149,11 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
handlers[vector] = (vm_vaddr_t)handler;
}
-void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
+void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
{
struct ucall uc;
- if (get_ucall(vm, vcpuid, &uc) == UCALL_UNHANDLED) {
+ if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED) {
uint64_t vector = uc.args[0];
TEST_FAIL("Unexpected vectored event in guest (vector:0x%lx)",
@@ -1381,16 +1161,15 @@ void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
}
}
-struct kvm_cpuid_entry2 *get_cpuid(struct kvm_cpuid2 *cpuid, uint32_t function,
- uint32_t index)
+const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
+ uint32_t function, uint32_t index)
{
int i;
for (i = 0; i < cpuid->nent; i++) {
- struct kvm_cpuid_entry2 *cur = &cpuid->entries[i];
-
- if (cur->function == function && cur->index == index)
- return cur;
+ if (cpuid->entries[i].function == function &&
+ cpuid->entries[i].index == index)
+ return &cpuid->entries[i];
}
TEST_FAIL("CPUID function 0x%x index 0x%x not found ", function, index);
@@ -1398,24 +1177,6 @@ struct kvm_cpuid_entry2 *get_cpuid(struct kvm_cpuid2 *cpuid, uint32_t function,
return NULL;
}
-bool set_cpuid(struct kvm_cpuid2 *cpuid,
- struct kvm_cpuid_entry2 *ent)
-{
- int i;
-
- for (i = 0; i < cpuid->nent; i++) {
- struct kvm_cpuid_entry2 *cur = &cpuid->entries[i];
-
- if (cur->function != ent->function || cur->index != ent->index)
- continue;
-
- memcpy(cur, ent, sizeof(struct kvm_cpuid_entry2));
- return true;
- }
-
- return false;
-}
-
uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
uint64_t a3)
{
@@ -1423,43 +1184,38 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
asm volatile("vmcall"
: "=a"(r)
- : "b"(a0), "c"(a1), "d"(a2), "S"(a3));
+ : "a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3));
return r;
}
-struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)
+const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)
{
static struct kvm_cpuid2 *cpuid;
- int ret;
int kvm_fd;
if (cpuid)
return cpuid;
- cpuid = allocate_kvm_cpuid2();
+ cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
kvm_fd = open_kvm_dev_path_or_exit();
- ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
- TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_HV_CPUID failed %d %d\n",
- ret, errno);
+ kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
close(kvm_fd);
return cpuid;
}
-void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid)
+void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu)
{
static struct kvm_cpuid2 *cpuid_full;
- struct kvm_cpuid2 *cpuid_sys, *cpuid_hv;
+ const struct kvm_cpuid2 *cpuid_sys, *cpuid_hv;
int i, nent = 0;
if (!cpuid_full) {
cpuid_sys = kvm_get_supported_cpuid();
cpuid_hv = kvm_get_supported_hv_cpuid();
- cpuid_full = malloc(sizeof(*cpuid_full) +
- (cpuid_sys->nent + cpuid_hv->nent) *
- sizeof(struct kvm_cpuid_entry2));
+ cpuid_full = allocate_kvm_cpuid2(cpuid_sys->nent + cpuid_hv->nent);
if (!cpuid_full) {
perror("malloc");
abort();
@@ -1479,16 +1235,14 @@ void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid)
cpuid_full->nent = nent + cpuid_hv->nent;
}
- vcpu_set_cpuid(vm, vcpuid, cpuid_full);
+ vcpu_init_cpuid(vcpu, cpuid_full);
}
-struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid)
+const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu)
{
- static struct kvm_cpuid2 *cpuid;
+ struct kvm_cpuid2 *cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
- cpuid = allocate_kvm_cpuid2();
-
- vcpu_ioctl(vm, vcpuid, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
+ vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
return cpuid;
}
@@ -1511,9 +1265,7 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
/* Before family 17h, the HyperTransport area is just below 1T. */
ht_gfn = (1 << 28) - num_ht_pages;
- eax = 1;
- ecx = 0;
- cpuid(&eax, &ebx, &ecx, &edx);
+ cpuid(1, &eax, &ebx, &ecx, &edx);
if (x86_family(eax) < 0x17)
goto done;
@@ -1522,18 +1274,15 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
* reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX. Use
* the old conservative value if MAXPHYADDR is not enumerated.
*/
- eax = 0x80000000;
- cpuid(&eax, &ebx, &ecx, &edx);
+ cpuid(0x80000000, &eax, &ebx, &ecx, &edx);
max_ext_leaf = eax;
if (max_ext_leaf < 0x80000008)
goto done;
- eax = 0x80000008;
- cpuid(&eax, &ebx, &ecx, &edx);
+ cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
max_pfn = (1ULL << ((eax & 0xff) - vm->page_shift)) - 1;
if (max_ext_leaf >= 0x8000001f) {
- eax = 0x8000001f;
- cpuid(&eax, &ebx, &ecx, &edx);
+ cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
max_pfn >>= (ebx >> 6) & 0x3f;
}
@@ -1541,3 +1290,24 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
done:
return min(max_gfn, ht_gfn - 1);
}
+
+/* Returns true if kvm_intel was loaded with unrestricted_guest=1. */
+bool vm_is_unrestricted_guest(struct kvm_vm *vm)
+{
+ char val = 'N';
+ size_t count;
+ FILE *f;
+
+ /* Ensure that a KVM vendor-specific module is loaded. */
+ if (vm == NULL)
+ close(open_kvm_dev_path_or_exit());
+
+ f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r");
+ if (f) {
+ count = fread(&val, sizeof(char), 1, f);
+ TEST_ASSERT(count == 1, "Unable to read from param file.");
+ fclose(f);
+ }
+
+ return val == 'Y';
+}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/svm.c b/tools/testing/selftests/kvm/lib/x86_64/svm.c
index 736ee4a23df6..6d445886e16c 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/svm.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/svm.c
@@ -9,7 +9,6 @@
#include "test_util.h"
#include "kvm_util.h"
-#include "../kvm_util_internal.h"
#include "processor.h"
#include "svm_util.h"
@@ -165,22 +164,6 @@ void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa)
: "r15", "memory");
}
-bool nested_svm_supported(void)
-{
- struct kvm_cpuid_entry2 *entry =
- kvm_get_supported_cpuid_entry(0x80000001);
-
- return entry->ecx & CPUID_SVM;
-}
-
-void nested_svm_check_supported(void)
-{
- if (!nested_svm_supported()) {
- print_skip("nested SVM not enabled");
- exit(KSFT_SKIP);
- }
-}
-
/*
* Open SEV_DEV_PATH if available, otherwise exit the entire program.
*
diff --git a/tools/testing/selftests/kvm/lib/x86_64/ucall.c b/tools/testing/selftests/kvm/lib/x86_64/ucall.c
index a3489973e290..e5f0f9e0d3ee 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/ucall.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/ucall.c
@@ -24,7 +24,7 @@ void ucall(uint64_t cmd, int nargs, ...)
va_list va;
int i;
- nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
+ nargs = min(nargs, UCALL_MAX_ARGS);
va_start(va, nargs);
for (i = 0; i < nargs; ++i)
@@ -35,9 +35,9 @@ void ucall(uint64_t cmd, int nargs, ...)
: : [port] "d" (UCALL_PIO_PORT), "D" (&uc) : "rax", "memory");
}
-uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
+uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
{
- struct kvm_run *run = vcpu_state(vm, vcpu_id);
+ struct kvm_run *run = vcpu->run;
struct ucall ucall = {};
if (uc)
@@ -46,11 +46,11 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
if (run->exit_reason == KVM_EXIT_IO && run->io.port == UCALL_PIO_PORT) {
struct kvm_regs regs;
- vcpu_regs_get(vm, vcpu_id, &regs);
- memcpy(&ucall, addr_gva2hva(vm, (vm_vaddr_t)regs.rdi),
+ vcpu_regs_get(vcpu, &regs);
+ memcpy(&ucall, addr_gva2hva(vcpu->vm, (vm_vaddr_t)regs.rdi),
sizeof(ucall));
- vcpu_run_complete_io(vm, vcpu_id);
+ vcpu_run_complete_io(vcpu);
if (uc)
memcpy(uc, &ucall, sizeof(ucall));
}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index d089d8b850b5..80a568c439b8 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -7,7 +7,6 @@
#include "test_util.h"
#include "kvm_util.h"
-#include "../kvm_util_internal.h"
#include "processor.h"
#include "vmx.h"
@@ -43,16 +42,12 @@ struct eptPageTablePointer {
uint64_t address:40;
uint64_t reserved_63_52:12;
};
-int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id)
+int vcpu_enable_evmcs(struct kvm_vcpu *vcpu)
{
uint16_t evmcs_ver;
- struct kvm_enable_cap enable_evmcs_cap = {
- .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
- .args[0] = (unsigned long)&evmcs_ver
- };
-
- vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_evmcs_cap);
+ vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
+ (unsigned long)&evmcs_ver);
/* KVM should return supported EVMCS version range */
TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
@@ -198,6 +193,16 @@ bool load_vmcs(struct vmx_pages *vmx)
return true;
}
+static bool ept_vpid_cap_supported(uint64_t mask)
+{
+ return rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & mask;
+}
+
+bool ept_1g_pages_supported(void)
+{
+ return ept_vpid_cap_supported(VMX_EPT_VPID_CAP_1G_PAGES);
+}
+
/*
* Initialize the control fields to the most basic settings possible.
*/
@@ -215,7 +220,7 @@ static inline void init_vmcs_control_fields(struct vmx_pages *vmx)
struct eptPageTablePointer eptp = {
.memory_type = VMX_BASIC_MEM_TYPE_WB,
.page_walk_length = 3, /* + 1 */
- .ad_enabled = !!(rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & VMX_EPT_VPID_CAP_AD_BITS),
+ .ad_enabled = ept_vpid_cap_supported(VMX_EPT_VPID_CAP_AD_BITS),
.address = vmx->eptp_gpa >> PAGE_SHIFT_4K,
};
@@ -377,95 +382,93 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
init_vmcs_guest_state(guest_rip, guest_rsp);
}
-bool nested_vmx_supported(void)
-{
- struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
-
- return entry->ecx & CPUID_VMX;
-}
-
-void nested_vmx_check_supported(void)
+static void nested_create_pte(struct kvm_vm *vm,
+ struct eptPageTableEntry *pte,
+ uint64_t nested_paddr,
+ uint64_t paddr,
+ int current_level,
+ int target_level)
{
- if (!nested_vmx_supported()) {
- print_skip("nested VMX not enabled");
- exit(KSFT_SKIP);
+ if (!pte->readable) {
+ pte->writable = true;
+ pte->readable = true;
+ pte->executable = true;
+ pte->page_size = (current_level == target_level);
+ if (pte->page_size)
+ pte->address = paddr >> vm->page_shift;
+ else
+ pte->address = vm_alloc_page_table(vm) >> vm->page_shift;
+ } else {
+ /*
+ * Entry already present. Assert that the caller doesn't want
+ * a hugepage at this level, and that there isn't a hugepage at
+ * this level.
+ */
+ TEST_ASSERT(current_level != target_level,
+ "Cannot create hugepage at level: %u, nested_paddr: 0x%lx\n",
+ current_level, nested_paddr);
+ TEST_ASSERT(!pte->page_size,
+ "Cannot create page table at level: %u, nested_paddr: 0x%lx\n",
+ current_level, nested_paddr);
}
}
-void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint64_t nested_paddr, uint64_t paddr)
+
+void __nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
+ uint64_t nested_paddr, uint64_t paddr, int target_level)
{
- uint16_t index[4];
- struct eptPageTableEntry *pml4e;
+ const uint64_t page_size = PG_LEVEL_SIZE(target_level);
+ struct eptPageTableEntry *pt = vmx->eptp_hva, *pte;
+ uint16_t index;
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
- TEST_ASSERT((nested_paddr % vm->page_size) == 0,
+ TEST_ASSERT((nested_paddr >> 48) == 0,
+ "Nested physical address 0x%lx requires 5-level paging",
+ nested_paddr);
+ TEST_ASSERT((nested_paddr % page_size) == 0,
"Nested physical address not on page boundary,\n"
- " nested_paddr: 0x%lx vm->page_size: 0x%x",
- nested_paddr, vm->page_size);
+ " nested_paddr: 0x%lx page_size: 0x%lx",
+ nested_paddr, page_size);
TEST_ASSERT((nested_paddr >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond beyond maximum supported,\n"
" nested_paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
paddr, vm->max_gfn, vm->page_size);
- TEST_ASSERT((paddr % vm->page_size) == 0,
+ TEST_ASSERT((paddr % page_size) == 0,
"Physical address not on page boundary,\n"
- " paddr: 0x%lx vm->page_size: 0x%x",
- paddr, vm->page_size);
+ " paddr: 0x%lx page_size: 0x%lx",
+ paddr, page_size);
TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
"Physical address beyond beyond maximum supported,\n"
" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
paddr, vm->max_gfn, vm->page_size);
- index[0] = (nested_paddr >> 12) & 0x1ffu;
- index[1] = (nested_paddr >> 21) & 0x1ffu;
- index[2] = (nested_paddr >> 30) & 0x1ffu;
- index[3] = (nested_paddr >> 39) & 0x1ffu;
-
- /* Allocate page directory pointer table if not present. */
- pml4e = vmx->eptp_hva;
- if (!pml4e[index[3]].readable) {
- pml4e[index[3]].address = vm_alloc_page_table(vm) >> vm->page_shift;
- pml4e[index[3]].writable = true;
- pml4e[index[3]].readable = true;
- pml4e[index[3]].executable = true;
- }
+ for (int level = PG_LEVEL_512G; level >= PG_LEVEL_4K; level--) {
+ index = (nested_paddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
+ pte = &pt[index];
- /* Allocate page directory table if not present. */
- struct eptPageTableEntry *pdpe;
- pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
- if (!pdpe[index[2]].readable) {
- pdpe[index[2]].address = vm_alloc_page_table(vm) >> vm->page_shift;
- pdpe[index[2]].writable = true;
- pdpe[index[2]].readable = true;
- pdpe[index[2]].executable = true;
- }
+ nested_create_pte(vm, pte, nested_paddr, paddr, level, target_level);
- /* Allocate page table if not present. */
- struct eptPageTableEntry *pde;
- pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
- if (!pde[index[1]].readable) {
- pde[index[1]].address = vm_alloc_page_table(vm) >> vm->page_shift;
- pde[index[1]].writable = true;
- pde[index[1]].readable = true;
- pde[index[1]].executable = true;
- }
+ if (pte->page_size)
+ break;
- /* Fill in page table entry. */
- struct eptPageTableEntry *pte;
- pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
- pte[index[0]].address = paddr >> vm->page_shift;
- pte[index[0]].writable = true;
- pte[index[0]].readable = true;
- pte[index[0]].executable = true;
+ pt = addr_gpa2hva(vm, pte->address * vm->page_size);
+ }
/*
* For now mark these as accessed and dirty because the only
* testcase we have needs that. Can be reconsidered later.
*/
- pte[index[0]].accessed = true;
- pte[index[0]].dirty = true;
+ pte->accessed = true;
+ pte->dirty = true;
+
+}
+
+void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
+ uint64_t nested_paddr, uint64_t paddr)
+{
+ __nested_pg_map(vmx, vm, nested_paddr, paddr, PG_LEVEL_4K);
}
/*
@@ -476,7 +479,7 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
* nested_paddr - Nested guest physical address to map
* paddr - VM Physical Address
* size - The size of the range to map
- * eptp_memslot - Memory region slot for new virtual translation tables
+ * level - The level at which to map the range
*
* Output Args: None
*
@@ -485,22 +488,29 @@ void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
* Within the VM given by vm, creates a nested guest translation for the
* page range starting at nested_paddr to the page range starting at paddr.
*/
-void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
- uint64_t nested_paddr, uint64_t paddr, uint64_t size)
+void __nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
+ uint64_t nested_paddr, uint64_t paddr, uint64_t size,
+ int level)
{
- size_t page_size = vm->page_size;
+ size_t page_size = PG_LEVEL_SIZE(level);
size_t npages = size / page_size;
TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow");
TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
while (npages--) {
- nested_pg_map(vmx, vm, nested_paddr, paddr);
+ __nested_pg_map(vmx, vm, nested_paddr, paddr, level);
nested_paddr += page_size;
paddr += page_size;
}
}
+void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
+ uint64_t nested_paddr, uint64_t paddr, uint64_t size)
+{
+ __nested_map(vmx, vm, nested_paddr, paddr, size, PG_LEVEL_4K);
+}
+
/* Prepare an identity extended page table that maps all the
* physical pages in VM.
*/
@@ -525,6 +535,13 @@ void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
}
}
+/* Identity map a region with 1GiB Pages. */
+void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
+ uint64_t addr, uint64_t size)
+{
+ __nested_map(vmx, vm, addr, addr, size, PG_LEVEL_1G);
+}
+
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t eptp_memslot)
{
diff --git a/tools/testing/selftests/kvm/max_guest_memory_test.c b/tools/testing/selftests/kvm/max_guest_memory_test.c
index 3875c4b23a04..9a6e4f3ad6b5 100644
--- a/tools/testing/selftests/kvm/max_guest_memory_test.c
+++ b/tools/testing/selftests/kvm/max_guest_memory_test.c
@@ -28,8 +28,7 @@ static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride)
}
struct vcpu_info {
- struct kvm_vm *vm;
- uint32_t id;
+ struct kvm_vcpu *vcpu;
uint64_t start_gpa;
uint64_t end_gpa;
};
@@ -52,45 +51,45 @@ static void rendezvous_with_boss(void)
}
}
-static void run_vcpu(struct kvm_vm *vm, uint32_t vcpu_id)
+static void run_vcpu(struct kvm_vcpu *vcpu)
{
- vcpu_run(vm, vcpu_id);
- ASSERT_EQ(get_ucall(vm, vcpu_id, NULL), UCALL_DONE);
+ vcpu_run(vcpu);
+ ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
}
static void *vcpu_worker(void *data)
{
- struct vcpu_info *vcpu = data;
+ struct vcpu_info *info = data;
+ struct kvm_vcpu *vcpu = info->vcpu;
struct kvm_vm *vm = vcpu->vm;
struct kvm_sregs sregs;
struct kvm_regs regs;
- vcpu_args_set(vm, vcpu->id, 3, vcpu->start_gpa, vcpu->end_gpa,
- vm_get_page_size(vm));
+ vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, vm->page_size);
/* Snapshot regs before the first run. */
- vcpu_regs_get(vm, vcpu->id, &regs);
+ vcpu_regs_get(vcpu, &regs);
rendezvous_with_boss();
- run_vcpu(vm, vcpu->id);
+ run_vcpu(vcpu);
rendezvous_with_boss();
- vcpu_regs_set(vm, vcpu->id, &regs);
- vcpu_sregs_get(vm, vcpu->id, &sregs);
+ vcpu_regs_set(vcpu, &regs);
+ vcpu_sregs_get(vcpu, &sregs);
#ifdef __x86_64__
/* Toggle CR0.WP to trigger a MMU context reset. */
sregs.cr0 ^= X86_CR0_WP;
#endif
- vcpu_sregs_set(vm, vcpu->id, &sregs);
+ vcpu_sregs_set(vcpu, &sregs);
rendezvous_with_boss();
- run_vcpu(vm, vcpu->id);
+ run_vcpu(vcpu);
rendezvous_with_boss();
return NULL;
}
-static pthread_t *spawn_workers(struct kvm_vm *vm, uint64_t start_gpa,
- uint64_t end_gpa)
+static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus,
+ uint64_t start_gpa, uint64_t end_gpa)
{
struct vcpu_info *info;
uint64_t gpa, nr_bytes;
@@ -104,12 +103,11 @@ static pthread_t *spawn_workers(struct kvm_vm *vm, uint64_t start_gpa,
TEST_ASSERT(info, "Failed to allocate vCPU gpa ranges");
nr_bytes = ((end_gpa - start_gpa) / nr_vcpus) &
- ~((uint64_t)vm_get_page_size(vm) - 1);
+ ~((uint64_t)vm->page_size - 1);
TEST_ASSERT(nr_bytes, "C'mon, no way you have %d CPUs", nr_vcpus);
for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa += nr_bytes) {
- info[i].vm = vm;
- info[i].id = i;
+ info[i].vcpu = vcpus[i];
info[i].start_gpa = gpa;
info[i].end_gpa = gpa + nr_bytes;
pthread_create(&threads[i], NULL, vcpu_worker, &info[i]);
@@ -172,6 +170,7 @@ int main(int argc, char *argv[])
uint64_t max_gpa, gpa, slot_size, max_mem, i;
int max_slots, slot, opt, fd;
bool hugepages = false;
+ struct kvm_vcpu **vcpus;
pthread_t *threads;
struct kvm_vm *vm;
void *mem;
@@ -215,9 +214,12 @@ int main(int argc, char *argv[])
}
}
- vm = vm_create_default_with_vcpus(nr_vcpus, 0, 0, guest_code, NULL);
+ vcpus = malloc(nr_vcpus * sizeof(*vcpus));
+ TEST_ASSERT(vcpus, "Failed to allocate vCPU array");
+
+ vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
- max_gpa = vm_get_max_gfn(vm) << vm_get_page_shift(vm);
+ max_gpa = vm->max_gfn << vm->page_shift;
TEST_ASSERT(max_gpa > (4 * slot_size), "MAXPHYADDR <4gb ");
fd = kvm_memfd_alloc(slot_size, hugepages);
@@ -227,7 +229,7 @@ int main(int argc, char *argv[])
TEST_ASSERT(!madvise(mem, slot_size, MADV_NOHUGEPAGE), "madvise() failed");
/* Pre-fault the memory to avoid taking mmap_sem on guest page faults. */
- for (i = 0; i < slot_size; i += vm_get_page_size(vm))
+ for (i = 0; i < slot_size; i += vm->page_size)
((uint8_t *)mem)[i] = 0xaa;
gpa = 0;
@@ -244,15 +246,18 @@ int main(int argc, char *argv[])
#ifdef __x86_64__
/* Identity map memory in the guest using 1gb pages. */
for (i = 0; i < slot_size; i += size_1gb)
- __virt_pg_map(vm, gpa + i, gpa + i, X86_PAGE_SIZE_1G);
+ __virt_pg_map(vm, gpa + i, gpa + i, PG_LEVEL_1G);
#else
- for (i = 0; i < slot_size; i += vm_get_page_size(vm))
+ for (i = 0; i < slot_size; i += vm->page_size)
virt_pg_map(vm, gpa + i, gpa + i);
#endif
}
atomic_set(&rendezvous, nr_vcpus + 1);
- threads = spawn_workers(vm, start_gpa, gpa);
+ threads = spawn_workers(vm, vcpus, start_gpa, gpa);
+
+ free(vcpus);
+ vcpus = NULL;
pr_info("Running with %lugb of guest memory and %u vCPUs\n",
(gpa - start_gpa) / size_1gb, nr_vcpus);
diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
index 1410d0a9141a..6ee7e1dde404 100644
--- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c
+++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c
@@ -38,19 +38,18 @@ static bool run_vcpus = true;
static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
{
- int ret;
- int vcpu_id = vcpu_args->vcpu_id;
- struct kvm_vm *vm = perf_test_args.vm;
+ struct kvm_vcpu *vcpu = vcpu_args->vcpu;
struct kvm_run *run;
+ int ret;
- run = vcpu_state(vm, vcpu_id);
+ run = vcpu->run;
/* Let the guest access its memory until a stop signal is received */
while (READ_ONCE(run_vcpus)) {
- ret = _vcpu_run(vm, vcpu_id);
+ ret = _vcpu_run(vcpu);
TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
- if (get_ucall(vm, vcpu_id, NULL) == UCALL_SYNC)
+ if (get_ucall(vcpu, NULL) == UCALL_SYNC)
continue;
TEST_ASSERT(false,
@@ -76,7 +75,7 @@ static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay,
* Add the dummy memslot just below the perf_test_util memslot, which is
* at the top of the guest physical address space.
*/
- gpa = perf_test_args.gpa - pages * vm_get_page_size(vm);
+ gpa = perf_test_args.gpa - pages * vm->page_size;
for (i = 0; i < nr_modifications; i++) {
usleep(delay);
diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c
index 1727f75e0c2c..44995446d942 100644
--- a/tools/testing/selftests/kvm/memslot_perf_test.c
+++ b/tools/testing/selftests/kvm/memslot_perf_test.c
@@ -25,8 +25,6 @@
#include <kvm_util.h>
#include <processor.h>
-#define VCPU_ID 0
-
#define MEM_SIZE ((512U << 20) + 4096)
#define MEM_SIZE_PAGES (MEM_SIZE / 4096)
#define MEM_GPA 0x10000000UL
@@ -90,6 +88,7 @@ static_assert(MEM_TEST_MOVE_SIZE <= MEM_TEST_SIZE,
struct vm_data {
struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu;
pthread_t vcpu_thread;
uint32_t nslots;
uint64_t npages;
@@ -127,29 +126,29 @@ static bool verbose;
pr_info(__VA_ARGS__); \
} while (0)
-static void check_mmio_access(struct vm_data *vm, struct kvm_run *run)
+static void check_mmio_access(struct vm_data *data, struct kvm_run *run)
{
- TEST_ASSERT(vm->mmio_ok, "Unexpected mmio exit");
+ TEST_ASSERT(data->mmio_ok, "Unexpected mmio exit");
TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read");
TEST_ASSERT(run->mmio.len == 8,
"Unexpected exit mmio size = %u", run->mmio.len);
- TEST_ASSERT(run->mmio.phys_addr >= vm->mmio_gpa_min &&
- run->mmio.phys_addr <= vm->mmio_gpa_max,
+ TEST_ASSERT(run->mmio.phys_addr >= data->mmio_gpa_min &&
+ run->mmio.phys_addr <= data->mmio_gpa_max,
"Unexpected exit mmio address = 0x%llx",
run->mmio.phys_addr);
}
-static void *vcpu_worker(void *data)
+static void *vcpu_worker(void *__data)
{
- struct vm_data *vm = data;
- struct kvm_run *run;
+ struct vm_data *data = __data;
+ struct kvm_vcpu *vcpu = data->vcpu;
+ struct kvm_run *run = vcpu->run;
struct ucall uc;
- run = vcpu_state(vm->vm, VCPU_ID);
while (1) {
- vcpu_run(vm->vm, VCPU_ID);
+ vcpu_run(vcpu);
- switch (get_ucall(vm->vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
TEST_ASSERT(uc.args[1] == 0,
"Unexpected sync ucall, got %lx",
@@ -158,14 +157,12 @@ static void *vcpu_worker(void *data)
continue;
case UCALL_NONE:
if (run->exit_reason == KVM_EXIT_MMIO)
- check_mmio_access(vm, run);
+ check_mmio_access(data, run);
else
goto done;
break;
case UCALL_ABORT:
- TEST_FAIL("%s at %s:%ld, val = %lu",
- (const char *)uc.args[0],
- __FILE__, uc.args[1], uc.args[2]);
+ REPORT_GUEST_ASSERT_1(uc, "val = %lu");
break;
case UCALL_DONE:
goto done;
@@ -238,6 +235,7 @@ static struct vm_data *alloc_vm(void)
TEST_ASSERT(data, "malloc(vmdata) failed");
data->vm = NULL;
+ data->vcpu = NULL;
data->hva_slots = NULL;
return data;
@@ -278,7 +276,7 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots);
TEST_ASSERT(data->hva_slots, "malloc() fail");
- data->vm = vm_create_default(VCPU_ID, mempages, guest_code);
+ data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code);
ucall_init(data->vm, NULL);
pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n",
diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c
index 4158da0da2bb..a54d4d05a058 100644
--- a/tools/testing/selftests/kvm/rseq_test.c
+++ b/tools/testing/selftests/kvm/rseq_test.c
@@ -20,8 +20,6 @@
#include "processor.h"
#include "test_util.h"
-#define VCPU_ID 0
-
static __thread volatile struct rseq __rseq = {
.cpu_id = RSEQ_CPU_ID_UNINITIALIZED,
};
@@ -82,8 +80,9 @@ static int next_cpu(int cpu)
return cpu;
}
-static void *migration_worker(void *ign)
+static void *migration_worker(void *__rseq_tid)
{
+ pid_t rseq_tid = (pid_t)(unsigned long)__rseq_tid;
cpu_set_t allowed_mask;
int r, i, cpu;
@@ -106,7 +105,7 @@ static void *migration_worker(void *ign)
* stable, i.e. while changing affinity is in-progress.
*/
smp_wmb();
- r = sched_setaffinity(0, sizeof(allowed_mask), &allowed_mask);
+ r = sched_setaffinity(rseq_tid, sizeof(allowed_mask), &allowed_mask);
TEST_ASSERT(!r, "sched_setaffinity failed, errno = %d (%s)",
errno, strerror(errno));
smp_wmb();
@@ -173,12 +172,11 @@ static void *migration_worker(void *ign)
return NULL;
}
-static int calc_min_max_cpu(void)
+static void calc_min_max_cpu(void)
{
int i, cnt, nproc;
- if (CPU_COUNT(&possible_mask) < 2)
- return -EINVAL;
+ TEST_REQUIRE(CPU_COUNT(&possible_mask) >= 2);
/*
* CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that
@@ -200,13 +198,15 @@ static int calc_min_max_cpu(void)
cnt++;
}
- return (cnt < 2) ? -EINVAL : 0;
+ __TEST_REQUIRE(cnt >= 2,
+ "Only one usable CPU, task migration not possible");
}
int main(int argc, char *argv[])
{
int r, i, snapshot;
struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu;
u32 cpu, rseq_cpu;
/* Tell stdout not to buffer its content */
@@ -216,10 +216,7 @@ int main(int argc, char *argv[])
TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno,
strerror(errno));
- if (calc_min_max_cpu()) {
- print_skip("Only one usable CPU, task migration not possible");
- exit(KSFT_SKIP);
- }
+ calc_min_max_cpu();
sys_rseq(0);
@@ -228,14 +225,15 @@ int main(int argc, char *argv[])
* GUEST_SYNC, while concurrently migrating the process by setting its
* CPU affinity.
*/
- vm = vm_create_default(VCPU_ID, 0, guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
ucall_init(vm, NULL);
- pthread_create(&migration_thread, NULL, migration_worker, 0);
+ pthread_create(&migration_thread, NULL, migration_worker,
+ (void *)(unsigned long)gettid());
for (i = 0; !done; i++) {
- vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,
+ vcpu_run(vcpu);
+ TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Guest failed?");
/*
diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c
index b04c2c1b3c30..9113696d5178 100644
--- a/tools/testing/selftests/kvm/s390x/memop.c
+++ b/tools/testing/selftests/kvm/s390x/memop.c
@@ -10,8 +10,11 @@
#include <string.h>
#include <sys/ioctl.h>
+#include <linux/bits.h>
+
#include "test_util.h"
#include "kvm_util.h"
+#include "kselftest.h"
enum mop_target {
LOGICAL,
@@ -96,21 +99,18 @@ static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc desc)
return ksmo;
}
-/* vcpu dummy id signifying that vm instead of vcpu ioctl is to occur */
-const uint32_t VM_VCPU_ID = (uint32_t)-1;
-
-struct test_vcpu {
+struct test_info {
struct kvm_vm *vm;
- uint32_t id;
+ struct kvm_vcpu *vcpu;
};
#define PRINT_MEMOP false
-static void print_memop(uint32_t vcpu_id, const struct kvm_s390_mem_op *ksmo)
+static void print_memop(struct kvm_vcpu *vcpu, const struct kvm_s390_mem_op *ksmo)
{
if (!PRINT_MEMOP)
return;
- if (vcpu_id == VM_VCPU_ID)
+ if (!vcpu)
printf("vm memop(");
else
printf("vcpu memop(");
@@ -145,25 +145,29 @@ static void print_memop(uint32_t vcpu_id, const struct kvm_s390_mem_op *ksmo)
puts(")");
}
-static void memop_ioctl(struct test_vcpu vcpu, struct kvm_s390_mem_op *ksmo)
+static void memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo)
{
- if (vcpu.id == VM_VCPU_ID)
- vm_ioctl(vcpu.vm, KVM_S390_MEM_OP, ksmo);
+ struct kvm_vcpu *vcpu = info.vcpu;
+
+ if (!vcpu)
+ vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo);
else
- vcpu_ioctl(vcpu.vm, vcpu.id, KVM_S390_MEM_OP, ksmo);
+ vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo);
}
-static int err_memop_ioctl(struct test_vcpu vcpu, struct kvm_s390_mem_op *ksmo)
+static int err_memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo)
{
- if (vcpu.id == VM_VCPU_ID)
- return _vm_ioctl(vcpu.vm, KVM_S390_MEM_OP, ksmo);
+ struct kvm_vcpu *vcpu = info.vcpu;
+
+ if (!vcpu)
+ return __vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo);
else
- return _vcpu_ioctl(vcpu.vm, vcpu.id, KVM_S390_MEM_OP, ksmo);
+ return __vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo);
}
-#define MEMOP(err, vcpu_p, mop_target_p, access_mode_p, buf_p, size_p, ...) \
+#define MEMOP(err, info_p, mop_target_p, access_mode_p, buf_p, size_p, ...) \
({ \
- struct test_vcpu __vcpu = (vcpu_p); \
+ struct test_info __info = (info_p); \
struct mop_desc __desc = { \
.target = (mop_target_p), \
.mode = (access_mode_p), \
@@ -175,13 +179,13 @@ static int err_memop_ioctl(struct test_vcpu vcpu, struct kvm_s390_mem_op *ksmo)
\
if (__desc._gaddr_v) { \
if (__desc.target == ABSOLUTE) \
- __desc.gaddr = addr_gva2gpa(__vcpu.vm, __desc.gaddr_v); \
+ __desc.gaddr = addr_gva2gpa(__info.vm, __desc.gaddr_v); \
else \
__desc.gaddr = __desc.gaddr_v; \
} \
__ksmo = ksmo_from_desc(__desc); \
- print_memop(__vcpu.id, &__ksmo); \
- err##memop_ioctl(__vcpu, &__ksmo); \
+ print_memop(__info.vcpu, &__ksmo); \
+ err##memop_ioctl(__info, &__ksmo); \
})
#define MOP(...) MEMOP(, __VA_ARGS__)
@@ -194,10 +198,10 @@ static int err_memop_ioctl(struct test_vcpu vcpu, struct kvm_s390_mem_op *ksmo)
#define SIDA_OFFSET(o) ._sida_offset = 1, .sida_offset = (o)
#define AR(a) ._ar = 1, .ar = (a)
#define KEY(a) .f_key = 1, .key = (a)
+#define INJECT .f_inject = 1
#define CHECK_N_DO(f, ...) ({ f(__VA_ARGS__, CHECK_ONLY); f(__VA_ARGS__); })
-#define VCPU_ID 1
#define PAGE_SHIFT 12
#define PAGE_SIZE (1ULL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE - 1))
@@ -209,21 +213,22 @@ static uint8_t mem2[65536];
struct test_default {
struct kvm_vm *kvm_vm;
- struct test_vcpu vm;
- struct test_vcpu vcpu;
+ struct test_info vm;
+ struct test_info vcpu;
struct kvm_run *run;
int size;
};
static struct test_default test_default_init(void *guest_code)
{
+ struct kvm_vcpu *vcpu;
struct test_default t;
t.size = min((size_t)kvm_check_cap(KVM_CAP_S390_MEM_OP), sizeof(mem1));
- t.kvm_vm = vm_create_default(VCPU_ID, 0, guest_code);
- t.vm = (struct test_vcpu) { t.kvm_vm, VM_VCPU_ID };
- t.vcpu = (struct test_vcpu) { t.kvm_vm, VCPU_ID };
- t.run = vcpu_state(t.kvm_vm, VCPU_ID);
+ t.kvm_vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+ t.vm = (struct test_info) { t.kvm_vm, NULL };
+ t.vcpu = (struct test_info) { t.kvm_vm, vcpu };
+ t.run = vcpu->run;
return t;
}
@@ -238,14 +243,15 @@ enum stage {
STAGE_COPIED,
};
-#define HOST_SYNC(vcpu_p, stage) \
+#define HOST_SYNC(info_p, stage) \
({ \
- struct test_vcpu __vcpu = (vcpu_p); \
+ struct test_info __info = (info_p); \
+ struct kvm_vcpu *__vcpu = __info.vcpu; \
struct ucall uc; \
int __stage = (stage); \
\
- vcpu_run(__vcpu.vm, __vcpu.id); \
- get_ucall(__vcpu.vm, __vcpu.id, &uc); \
+ vcpu_run(__vcpu); \
+ get_ucall(__vcpu, &uc); \
ASSERT_EQ(uc.cmd, UCALL_SYNC); \
ASSERT_EQ(uc.args[1], __stage); \
}) \
@@ -264,7 +270,7 @@ static void prepare_mem12(void)
#define DEFAULT_WRITE_READ(copy_cpu, mop_cpu, mop_target_p, size, ...) \
({ \
- struct test_vcpu __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \
+ struct test_info __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \
enum mop_target __target = (mop_target_p); \
uint32_t __size = (size); \
\
@@ -279,7 +285,7 @@ static void prepare_mem12(void)
#define DEFAULT_READ(copy_cpu, mop_cpu, mop_target_p, size, ...) \
({ \
- struct test_vcpu __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \
+ struct test_info __copy_cpu = (copy_cpu), __mop_cpu = (mop_cpu); \
enum mop_target __target = (mop_target_p); \
uint32_t __size = (size); \
\
@@ -430,9 +436,18 @@ static void test_copy_key_fetch_prot(void)
TEST_ASSERT(rv == 4, "Should result in protection exception"); \
})
+static void guest_error_key(void)
+{
+ GUEST_SYNC(STAGE_INITED);
+ set_storage_key_range(mem1, PAGE_SIZE, 0x18);
+ set_storage_key_range(mem1 + PAGE_SIZE, sizeof(mem1) - PAGE_SIZE, 0x98);
+ GUEST_SYNC(STAGE_SKEYS_SET);
+ GUEST_SYNC(STAGE_IDLED);
+}
+
static void test_errors_key(void)
{
- struct test_default t = test_default_init(guest_copy_key_fetch_prot);
+ struct test_default t = test_default_init(guest_error_key);
HOST_SYNC(t.vcpu, STAGE_INITED);
HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
@@ -446,6 +461,37 @@ static void test_errors_key(void)
kvm_vm_free(t.kvm_vm);
}
+static void test_termination(void)
+{
+ struct test_default t = test_default_init(guest_error_key);
+ uint64_t prefix;
+ uint64_t teid;
+ uint64_t teid_mask = BIT(63 - 56) | BIT(63 - 60) | BIT(63 - 61);
+ uint64_t psw[2];
+
+ HOST_SYNC(t.vcpu, STAGE_INITED);
+ HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
+
+ /* vcpu, mismatching keys after first page */
+ ERR_PROT_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(1), INJECT);
+ /*
+ * The memop injected a program exception and the test needs to check the
+ * Translation-Exception Identification (TEID). It is necessary to run
+ * the guest in order to be able to read the TEID from guest memory.
+ * Set the guest program new PSW, so the guest state is not clobbered.
+ */
+ prefix = t.run->s.regs.prefix;
+ psw[0] = t.run->psw_mask;
+ psw[1] = t.run->psw_addr;
+ MOP(t.vm, ABSOLUTE, WRITE, psw, sizeof(psw), GADDR(prefix + 464));
+ HOST_SYNC(t.vcpu, STAGE_IDLED);
+ MOP(t.vm, ABSOLUTE, READ, &teid, sizeof(teid), GADDR(prefix + 168));
+ /* Bits 56, 60, 61 form a code, 0 being the only one allowing for termination */
+ ASSERT_EQ(teid & teid_mask, 0);
+
+ kvm_vm_free(t.kvm_vm);
+}
+
static void test_errors_key_storage_prot_override(void)
{
struct test_default t = test_default_init(guest_copy_key_fetch_prot);
@@ -580,34 +626,34 @@ static void guest_idle(void)
GUEST_SYNC(STAGE_IDLED);
}
-static void _test_errors_common(struct test_vcpu vcpu, enum mop_target target, int size)
+static void _test_errors_common(struct test_info info, enum mop_target target, int size)
{
int rv;
/* Bad size: */
- rv = ERR_MOP(vcpu, target, WRITE, mem1, -1, GADDR_V(mem1));
+ rv = ERR_MOP(info, target, WRITE, mem1, -1, GADDR_V(mem1));
TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes");
/* Zero size: */
- rv = ERR_MOP(vcpu, target, WRITE, mem1, 0, GADDR_V(mem1));
+ rv = ERR_MOP(info, target, WRITE, mem1, 0, GADDR_V(mem1));
TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM),
"ioctl allows 0 as size");
/* Bad flags: */
- rv = ERR_MOP(vcpu, target, WRITE, mem1, size, GADDR_V(mem1), SET_FLAGS(-1));
+ rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR_V(mem1), SET_FLAGS(-1));
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags");
/* Bad guest address: */
- rv = ERR_MOP(vcpu, target, WRITE, mem1, size, GADDR((void *)~0xfffUL), CHECK_ONLY);
+ rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR((void *)~0xfffUL), CHECK_ONLY);
TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory access");
/* Bad host address: */
- rv = ERR_MOP(vcpu, target, WRITE, 0, size, GADDR_V(mem1));
+ rv = ERR_MOP(info, target, WRITE, 0, size, GADDR_V(mem1));
TEST_ASSERT(rv == -1 && errno == EFAULT,
"ioctl does not report bad host memory address");
/* Bad key: */
- rv = ERR_MOP(vcpu, target, WRITE, mem1, size, GADDR_V(mem1), KEY(17));
+ rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR_V(mem1), KEY(17));
TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows invalid key");
}
@@ -648,33 +694,89 @@ static void test_errors(void)
kvm_vm_free(t.kvm_vm);
}
+struct testdef {
+ const char *name;
+ void (*test)(void);
+ int extension;
+} testlist[] = {
+ {
+ .name = "simple copy",
+ .test = test_copy,
+ },
+ {
+ .name = "generic error checks",
+ .test = test_errors,
+ },
+ {
+ .name = "copy with storage keys",
+ .test = test_copy_key,
+ .extension = 1,
+ },
+ {
+ .name = "copy with key storage protection override",
+ .test = test_copy_key_storage_prot_override,
+ .extension = 1,
+ },
+ {
+ .name = "copy with key fetch protection",
+ .test = test_copy_key_fetch_prot,
+ .extension = 1,
+ },
+ {
+ .name = "copy with key fetch protection override",
+ .test = test_copy_key_fetch_prot_override,
+ .extension = 1,
+ },
+ {
+ .name = "error checks with key",
+ .test = test_errors_key,
+ .extension = 1,
+ },
+ {
+ .name = "termination",
+ .test = test_termination,
+ .extension = 1,
+ },
+ {
+ .name = "error checks with key storage protection override",
+ .test = test_errors_key_storage_prot_override,
+ .extension = 1,
+ },
+ {
+ .name = "error checks without key fetch prot override",
+ .test = test_errors_key_fetch_prot_override_not_enabled,
+ .extension = 1,
+ },
+ {
+ .name = "error checks with key fetch prot override",
+ .test = test_errors_key_fetch_prot_override_enabled,
+ .extension = 1,
+ },
+};
+
int main(int argc, char *argv[])
{
- int memop_cap, extension_cap;
+ int extension_cap, idx;
+
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_MEM_OP));
setbuf(stdout, NULL); /* Tell stdout not to buffer its content */
- memop_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP);
- extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
- if (!memop_cap) {
- print_skip("CAP_S390_MEM_OP not supported");
- exit(KSFT_SKIP);
- }
+ ksft_print_header();
+
+ ksft_set_plan(ARRAY_SIZE(testlist));
- test_copy();
- if (extension_cap > 0) {
- test_copy_key();
- test_copy_key_storage_prot_override();
- test_copy_key_fetch_prot();
- test_copy_key_fetch_prot_override();
- test_errors_key();
- test_errors_key_storage_prot_override();
- test_errors_key_fetch_prot_override_not_enabled();
- test_errors_key_fetch_prot_override_enabled();
- } else {
- print_skip("storage key memop extension not supported");
+ extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
+ for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
+ if (extension_cap >= testlist[idx].extension) {
+ testlist[idx].test();
+ ksft_test_result_pass("%s\n", testlist[idx].name);
+ } else {
+ ksft_test_result_skip("%s - extension level %d not supported\n",
+ testlist[idx].name,
+ testlist[idx].extension);
+ }
}
- test_errors();
- return 0;
+ ksft_finished(); /* Print results and exit() accordingly */
}
diff --git a/tools/testing/selftests/kvm/s390x/resets.c b/tools/testing/selftests/kvm/s390x/resets.c
index b143db6d8693..19486084eb30 100644
--- a/tools/testing/selftests/kvm/s390x/resets.c
+++ b/tools/testing/selftests/kvm/s390x/resets.c
@@ -12,15 +12,14 @@
#include "test_util.h"
#include "kvm_util.h"
+#include "kselftest.h"
-#define VCPU_ID 3
#define LOCAL_IRQS 32
-struct kvm_s390_irq buf[VCPU_ID + LOCAL_IRQS];
+#define ARBITRARY_NON_ZERO_VCPU_ID 3
+
+struct kvm_s390_irq buf[ARBITRARY_NON_ZERO_VCPU_ID + LOCAL_IRQS];
-struct kvm_vm *vm;
-struct kvm_run *run;
-struct kvm_sync_regs *sync_regs;
static uint8_t regs_null[512];
static void guest_code_initial(void)
@@ -58,25 +57,22 @@ static void guest_code_initial(void)
);
}
-static void test_one_reg(uint64_t id, uint64_t value)
+static void test_one_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t value)
{
- struct kvm_one_reg reg;
uint64_t eval_reg;
- reg.addr = (uintptr_t)&eval_reg;
- reg.id = id;
- vcpu_get_reg(vm, VCPU_ID, &reg);
+ vcpu_get_reg(vcpu, id, &eval_reg);
TEST_ASSERT(eval_reg == value, "value == 0x%lx", value);
}
-static void assert_noirq(void)
+static void assert_noirq(struct kvm_vcpu *vcpu)
{
struct kvm_s390_irq_state irq_state;
int irqs;
irq_state.len = sizeof(buf);
irq_state.buf = (unsigned long)buf;
- irqs = _vcpu_ioctl(vm, VCPU_ID, KVM_S390_GET_IRQ_STATE, &irq_state);
+ irqs = __vcpu_ioctl(vcpu, KVM_S390_GET_IRQ_STATE, &irq_state);
/*
* irqs contains the number of retrieved interrupts. Any interrupt
* (notably, the emergency call interrupt we have injected) should
@@ -86,19 +82,20 @@ static void assert_noirq(void)
TEST_ASSERT(!irqs, "IRQ pending");
}
-static void assert_clear(void)
+static void assert_clear(struct kvm_vcpu *vcpu)
{
+ struct kvm_sync_regs *sync_regs = &vcpu->run->s.regs;
struct kvm_sregs sregs;
struct kvm_regs regs;
struct kvm_fpu fpu;
- vcpu_regs_get(vm, VCPU_ID, &regs);
+ vcpu_regs_get(vcpu, &regs);
TEST_ASSERT(!memcmp(&regs.gprs, regs_null, sizeof(regs.gprs)), "grs == 0");
- vcpu_sregs_get(vm, VCPU_ID, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
TEST_ASSERT(!memcmp(&sregs.acrs, regs_null, sizeof(sregs.acrs)), "acrs == 0");
- vcpu_fpu_get(vm, VCPU_ID, &fpu);
+ vcpu_fpu_get(vcpu, &fpu);
TEST_ASSERT(!memcmp(&fpu.fprs, regs_null, sizeof(fpu.fprs)), "fprs == 0");
/* sync regs */
@@ -112,8 +109,10 @@ static void assert_clear(void)
"vrs0-15 == 0 (sync_regs)");
}
-static void assert_initial_noclear(void)
+static void assert_initial_noclear(struct kvm_vcpu *vcpu)
{
+ struct kvm_sync_regs *sync_regs = &vcpu->run->s.regs;
+
TEST_ASSERT(sync_regs->gprs[0] == 0xffff000000000000UL,
"gpr0 == 0xffff000000000000 (sync_regs)");
TEST_ASSERT(sync_regs->gprs[1] == 0x0000555500000000UL,
@@ -127,13 +126,14 @@ static void assert_initial_noclear(void)
TEST_ASSERT(sync_regs->acrs[9] == 1, "ar9 == 1 (sync_regs)");
}
-static void assert_initial(void)
+static void assert_initial(struct kvm_vcpu *vcpu)
{
+ struct kvm_sync_regs *sync_regs = &vcpu->run->s.regs;
struct kvm_sregs sregs;
struct kvm_fpu fpu;
/* KVM_GET_SREGS */
- vcpu_sregs_get(vm, VCPU_ID, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
TEST_ASSERT(sregs.crs[0] == 0xE0UL, "cr0 == 0xE0 (KVM_GET_SREGS)");
TEST_ASSERT(sregs.crs[14] == 0xC2000000UL,
"cr14 == 0xC2000000 (KVM_GET_SREGS)");
@@ -156,36 +156,38 @@ static void assert_initial(void)
TEST_ASSERT(sync_regs->gbea == 1, "gbea == 1 (sync_regs)");
/* kvm_run */
- TEST_ASSERT(run->psw_addr == 0, "psw_addr == 0 (kvm_run)");
- TEST_ASSERT(run->psw_mask == 0, "psw_mask == 0 (kvm_run)");
+ TEST_ASSERT(vcpu->run->psw_addr == 0, "psw_addr == 0 (kvm_run)");
+ TEST_ASSERT(vcpu->run->psw_mask == 0, "psw_mask == 0 (kvm_run)");
- vcpu_fpu_get(vm, VCPU_ID, &fpu);
+ vcpu_fpu_get(vcpu, &fpu);
TEST_ASSERT(!fpu.fpc, "fpc == 0");
- test_one_reg(KVM_REG_S390_GBEA, 1);
- test_one_reg(KVM_REG_S390_PP, 0);
- test_one_reg(KVM_REG_S390_TODPR, 0);
- test_one_reg(KVM_REG_S390_CPU_TIMER, 0);
- test_one_reg(KVM_REG_S390_CLOCK_COMP, 0);
+ test_one_reg(vcpu, KVM_REG_S390_GBEA, 1);
+ test_one_reg(vcpu, KVM_REG_S390_PP, 0);
+ test_one_reg(vcpu, KVM_REG_S390_TODPR, 0);
+ test_one_reg(vcpu, KVM_REG_S390_CPU_TIMER, 0);
+ test_one_reg(vcpu, KVM_REG_S390_CLOCK_COMP, 0);
}
-static void assert_normal_noclear(void)
+static void assert_normal_noclear(struct kvm_vcpu *vcpu)
{
+ struct kvm_sync_regs *sync_regs = &vcpu->run->s.regs;
+
TEST_ASSERT(sync_regs->crs[2] == 0x10, "cr2 == 10 (sync_regs)");
TEST_ASSERT(sync_regs->crs[8] == 1, "cr10 == 1 (sync_regs)");
TEST_ASSERT(sync_regs->crs[10] == 1, "cr10 == 1 (sync_regs)");
TEST_ASSERT(sync_regs->crs[11] == -1, "cr11 == -1 (sync_regs)");
}
-static void assert_normal(void)
+static void assert_normal(struct kvm_vcpu *vcpu)
{
- test_one_reg(KVM_REG_S390_PFTOKEN, KVM_S390_PFAULT_TOKEN_INVALID);
- TEST_ASSERT(sync_regs->pft == KVM_S390_PFAULT_TOKEN_INVALID,
+ test_one_reg(vcpu, KVM_REG_S390_PFTOKEN, KVM_S390_PFAULT_TOKEN_INVALID);
+ TEST_ASSERT(vcpu->run->s.regs.pft == KVM_S390_PFAULT_TOKEN_INVALID,
"pft == 0xff..... (sync_regs)");
- assert_noirq();
+ assert_noirq(vcpu);
}
-static void inject_irq(int cpu_id)
+static void inject_irq(struct kvm_vcpu *vcpu)
{
struct kvm_s390_irq_state irq_state;
struct kvm_s390_irq *irq = &buf[0];
@@ -195,85 +197,119 @@ static void inject_irq(int cpu_id)
irq_state.len = sizeof(struct kvm_s390_irq);
irq_state.buf = (unsigned long)buf;
irq->type = KVM_S390_INT_EMERGENCY;
- irq->u.emerg.code = cpu_id;
- irqs = _vcpu_ioctl(vm, cpu_id, KVM_S390_SET_IRQ_STATE, &irq_state);
+ irq->u.emerg.code = vcpu->id;
+ irqs = __vcpu_ioctl(vcpu, KVM_S390_SET_IRQ_STATE, &irq_state);
TEST_ASSERT(irqs >= 0, "Error injecting EMERGENCY IRQ errno %d\n", errno);
}
+static struct kvm_vm *create_vm(struct kvm_vcpu **vcpu)
+{
+ struct kvm_vm *vm;
+
+ vm = vm_create(1);
+
+ *vcpu = vm_vcpu_add(vm, ARBITRARY_NON_ZERO_VCPU_ID, guest_code_initial);
+
+ return vm;
+}
+
static void test_normal(void)
{
- pr_info("Testing normal reset\n");
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code_initial);
- run = vcpu_state(vm, VCPU_ID);
- sync_regs = &run->s.regs;
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+
+ ksft_print_msg("Testing normal reset\n");
+ vm = create_vm(&vcpu);
- vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
- inject_irq(VCPU_ID);
+ inject_irq(vcpu);
- vcpu_ioctl(vm, VCPU_ID, KVM_S390_NORMAL_RESET, 0);
+ vcpu_ioctl(vcpu, KVM_S390_NORMAL_RESET, NULL);
/* must clears */
- assert_normal();
+ assert_normal(vcpu);
/* must not clears */
- assert_normal_noclear();
- assert_initial_noclear();
+ assert_normal_noclear(vcpu);
+ assert_initial_noclear(vcpu);
kvm_vm_free(vm);
}
static void test_initial(void)
{
- pr_info("Testing initial reset\n");
- vm = vm_create_default(VCPU_ID, 0, guest_code_initial);
- run = vcpu_state(vm, VCPU_ID);
- sync_regs = &run->s.regs;
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
- vcpu_run(vm, VCPU_ID);
+ ksft_print_msg("Testing initial reset\n");
+ vm = create_vm(&vcpu);
- inject_irq(VCPU_ID);
+ vcpu_run(vcpu);
- vcpu_ioctl(vm, VCPU_ID, KVM_S390_INITIAL_RESET, 0);
+ inject_irq(vcpu);
+
+ vcpu_ioctl(vcpu, KVM_S390_INITIAL_RESET, NULL);
/* must clears */
- assert_normal();
- assert_initial();
+ assert_normal(vcpu);
+ assert_initial(vcpu);
/* must not clears */
- assert_initial_noclear();
+ assert_initial_noclear(vcpu);
kvm_vm_free(vm);
}
static void test_clear(void)
{
- pr_info("Testing clear reset\n");
- vm = vm_create_default(VCPU_ID, 0, guest_code_initial);
- run = vcpu_state(vm, VCPU_ID);
- sync_regs = &run->s.regs;
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+
+ ksft_print_msg("Testing clear reset\n");
+ vm = create_vm(&vcpu);
- vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
- inject_irq(VCPU_ID);
+ inject_irq(vcpu);
- vcpu_ioctl(vm, VCPU_ID, KVM_S390_CLEAR_RESET, 0);
+ vcpu_ioctl(vcpu, KVM_S390_CLEAR_RESET, NULL);
/* must clears */
- assert_normal();
- assert_initial();
- assert_clear();
+ assert_normal(vcpu);
+ assert_initial(vcpu);
+ assert_clear(vcpu);
kvm_vm_free(vm);
}
+struct testdef {
+ const char *name;
+ void (*test)(void);
+ bool needs_cap;
+} testlist[] = {
+ { "initial", test_initial, false },
+ { "normal", test_normal, true },
+ { "clear", test_clear, true },
+};
+
int main(int argc, char *argv[])
{
+ bool has_s390_vcpu_resets = kvm_check_cap(KVM_CAP_S390_VCPU_RESETS);
+ int idx;
+
setbuf(stdout, NULL); /* Tell stdout not to buffer its content */
- test_initial();
- if (kvm_check_cap(KVM_CAP_S390_VCPU_RESETS)) {
- test_normal();
- test_clear();
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(testlist));
+
+ for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
+ if (!testlist[idx].needs_cap || has_s390_vcpu_resets) {
+ testlist[idx].test();
+ ksft_test_result_pass("%s\n", testlist[idx].name);
+ } else {
+ ksft_test_result_skip("%s - no VCPU_RESETS capability\n",
+ testlist[idx].name);
+ }
}
- return 0;
+
+ ksft_finished(); /* Print results and exit() accordingly */
}
diff --git a/tools/testing/selftests/kvm/s390x/sync_regs_test.c b/tools/testing/selftests/kvm/s390x/sync_regs_test.c
index caf7b8859a94..3fdb6e2598eb 100644
--- a/tools/testing/selftests/kvm/s390x/sync_regs_test.c
+++ b/tools/testing/selftests/kvm/s390x/sync_regs_test.c
@@ -21,8 +21,7 @@
#include "test_util.h"
#include "kvm_util.h"
#include "diag318_test_handler.h"
-
-#define VCPU_ID 5
+#include "kselftest.h"
static void guest_code(void)
{
@@ -74,61 +73,58 @@ static void compare_sregs(struct kvm_sregs *left, struct kvm_sync_regs *right)
#define TEST_SYNC_FIELDS (KVM_SYNC_GPRS|KVM_SYNC_ACRS|KVM_SYNC_CRS|KVM_SYNC_DIAG318)
#define INVALID_SYNC_FIELD 0x80000000
-int main(int argc, char *argv[])
+void test_read_invalid(struct kvm_vcpu *vcpu)
{
- struct kvm_vm *vm;
- struct kvm_run *run;
- struct kvm_regs regs;
- struct kvm_sregs sregs;
- int rv, cap;
-
- /* Tell stdout not to buffer its content */
- setbuf(stdout, NULL);
-
- cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
- if (!cap) {
- print_skip("CAP_SYNC_REGS not supported");
- exit(KSFT_SKIP);
- }
-
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code);
-
- run = vcpu_state(vm, VCPU_ID);
+ struct kvm_run *run = vcpu->run;
+ int rv;
/* Request reading invalid register set from VCPU. */
run->kvm_valid_regs = INVALID_SYNC_FIELD;
- rv = _vcpu_run(vm, VCPU_ID);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv);
- vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0;
+ run->kvm_valid_regs = 0;
run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
- rv = _vcpu_run(vm, VCPU_ID);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv);
- vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0;
+ run->kvm_valid_regs = 0;
+}
+
+void test_set_invalid(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ int rv;
/* Request setting invalid register set into VCPU. */
run->kvm_dirty_regs = INVALID_SYNC_FIELD;
- rv = _vcpu_run(vm, VCPU_ID);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv);
- vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0;
+ run->kvm_dirty_regs = 0;
run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
- rv = _vcpu_run(vm, VCPU_ID);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv);
- vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0;
+ run->kvm_dirty_regs = 0;
+}
+
+void test_req_and_verify_all_valid_regs(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ struct kvm_sregs sregs;
+ struct kvm_regs regs;
+ int rv;
/* Request and verify all valid register sets. */
run->kvm_valid_regs = TEST_SYNC_FIELDS;
- rv = _vcpu_run(vm, VCPU_ID);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
"Unexpected exit reason: %u (%s)\n",
@@ -141,11 +137,19 @@ int main(int argc, char *argv[])
run->s390_sieic.icptcode, run->s390_sieic.ipa,
run->s390_sieic.ipb);
- vcpu_regs_get(vm, VCPU_ID, &regs);
+ vcpu_regs_get(vcpu, &regs);
compare_regs(&regs, &run->s.regs);
- vcpu_sregs_get(vm, VCPU_ID, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
compare_sregs(&sregs, &run->s.regs);
+}
+
+void test_set_and_verify_various_reg_values(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ struct kvm_sregs sregs;
+ struct kvm_regs regs;
+ int rv;
/* Set and verify various register values */
run->s.regs.gprs[11] = 0xBAD1DEA;
@@ -159,7 +163,7 @@ int main(int argc, char *argv[])
run->kvm_dirty_regs |= KVM_SYNC_DIAG318;
}
- rv = _vcpu_run(vm, VCPU_ID);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
"Unexpected exit reason: %u (%s)\n",
@@ -175,11 +179,17 @@ int main(int argc, char *argv[])
"diag318 sync regs value incorrect 0x%llx.",
run->s.regs.diag318);
- vcpu_regs_get(vm, VCPU_ID, &regs);
+ vcpu_regs_get(vcpu, &regs);
compare_regs(&regs, &run->s.regs);
- vcpu_sregs_get(vm, VCPU_ID, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
compare_sregs(&sregs, &run->s.regs);
+}
+
+void test_clear_kvm_dirty_regs_bits(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ int rv;
/* Clear kvm_dirty_regs bits, verify new s.regs values are
* overwritten with existing guest values.
@@ -188,7 +198,7 @@ int main(int argc, char *argv[])
run->kvm_dirty_regs = 0;
run->s.regs.gprs[11] = 0xDEADBEEF;
run->s.regs.diag318 = 0x4B1D;
- rv = _vcpu_run(vm, VCPU_ID);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv);
TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC,
"Unexpected exit reason: %u (%s)\n",
@@ -200,8 +210,43 @@ int main(int argc, char *argv[])
TEST_ASSERT(run->s.regs.diag318 != 0x4B1D,
"diag318 sync regs value incorrect 0x%llx.",
run->s.regs.diag318);
+}
+
+struct testdef {
+ const char *name;
+ void (*test)(struct kvm_vcpu *vcpu);
+} testlist[] = {
+ { "read invalid", test_read_invalid },
+ { "set invalid", test_set_invalid },
+ { "request+verify all valid regs", test_req_and_verify_all_valid_regs },
+ { "set+verify various regs", test_set_and_verify_various_reg_values },
+ { "clear kvm_dirty_regs bits", test_clear_kvm_dirty_regs_bits },
+};
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ int idx;
+
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_SYNC_REGS));
+
+ /* Tell stdout not to buffer its content */
+ setbuf(stdout, NULL);
+
+ ksft_print_header();
+
+ ksft_set_plan(ARRAY_SIZE(testlist));
+
+ /* Create VM */
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+
+ for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
+ testlist[idx].test(vcpu);
+ ksft_test_result_pass("%s\n", testlist[idx].name);
+ }
kvm_vm_free(vm);
- return 0;
+ ksft_finished(); /* Print results and exit() accordingly */
}
diff --git a/tools/testing/selftests/kvm/s390x/tprot.c b/tools/testing/selftests/kvm/s390x/tprot.c
index c097b9db495e..a9a0b76e5fa4 100644
--- a/tools/testing/selftests/kvm/s390x/tprot.c
+++ b/tools/testing/selftests/kvm/s390x/tprot.c
@@ -8,14 +8,13 @@
#include <sys/mman.h>
#include "test_util.h"
#include "kvm_util.h"
+#include "kselftest.h"
#define PAGE_SHIFT 12
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))
#define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
-#define VCPU_ID 1
-
static __aligned(PAGE_SIZE) uint8_t pages[2][PAGE_SIZE];
static uint8_t *const page_store_prot = pages[0];
static uint8_t *const page_fetch_prot = pages[1];
@@ -63,12 +62,12 @@ static enum permission test_protection(void *addr, uint8_t key)
}
enum stage {
- STAGE_END,
STAGE_INIT_SIMPLE,
TEST_SIMPLE,
STAGE_INIT_FETCH_PROT_OVERRIDE,
TEST_FETCH_PROT_OVERRIDE,
TEST_STORAGE_PROT_OVERRIDE,
+ STAGE_END /* must be the last entry (it's the amount of tests) */
};
struct test {
@@ -182,46 +181,63 @@ static void guest_code(void)
GUEST_SYNC(perform_next_stage(&i, mapped_0));
}
-#define HOST_SYNC(vmp, stage) \
-({ \
- struct kvm_vm *__vm = (vmp); \
- struct ucall uc; \
- int __stage = (stage); \
- \
- vcpu_run(__vm, VCPU_ID); \
- get_ucall(__vm, VCPU_ID, &uc); \
- if (uc.cmd == UCALL_ABORT) { \
- TEST_FAIL("line %lu: %s, hints: %lu, %lu", uc.args[1], \
- (const char *)uc.args[0], uc.args[2], uc.args[3]); \
- } \
- ASSERT_EQ(uc.cmd, UCALL_SYNC); \
- ASSERT_EQ(uc.args[1], __stage); \
+#define HOST_SYNC_NO_TAP(vcpup, stage) \
+({ \
+ struct kvm_vcpu *__vcpu = (vcpup); \
+ struct ucall uc; \
+ int __stage = (stage); \
+ \
+ vcpu_run(__vcpu); \
+ get_ucall(__vcpu, &uc); \
+ if (uc.cmd == UCALL_ABORT) \
+ REPORT_GUEST_ASSERT_2(uc, "hints: %lu, %lu"); \
+ ASSERT_EQ(uc.cmd, UCALL_SYNC); \
+ ASSERT_EQ(uc.args[1], __stage); \
+})
+
+#define HOST_SYNC(vcpu, stage) \
+({ \
+ HOST_SYNC_NO_TAP(vcpu, stage); \
+ ksft_test_result_pass("" #stage "\n"); \
})
int main(int argc, char *argv[])
{
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
vm_vaddr_t guest_0_page;
- vm = vm_create_default(VCPU_ID, 0, guest_code);
- run = vcpu_state(vm, VCPU_ID);
+ ksft_print_header();
+ ksft_set_plan(STAGE_END);
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+ run = vcpu->run;
- HOST_SYNC(vm, STAGE_INIT_SIMPLE);
+ HOST_SYNC(vcpu, STAGE_INIT_SIMPLE);
mprotect(addr_gva2hva(vm, (vm_vaddr_t)pages), PAGE_SIZE * 2, PROT_READ);
- HOST_SYNC(vm, TEST_SIMPLE);
+ HOST_SYNC(vcpu, TEST_SIMPLE);
guest_0_page = vm_vaddr_alloc(vm, PAGE_SIZE, 0);
- if (guest_0_page != 0)
- print_skip("Did not allocate page at 0 for fetch protection override tests");
- HOST_SYNC(vm, STAGE_INIT_FETCH_PROT_OVERRIDE);
+ if (guest_0_page != 0) {
+ /* Use NO_TAP so we don't get a PASS print */
+ HOST_SYNC_NO_TAP(vcpu, STAGE_INIT_FETCH_PROT_OVERRIDE);
+ ksft_test_result_skip("STAGE_INIT_FETCH_PROT_OVERRIDE - "
+ "Did not allocate page at 0\n");
+ } else {
+ HOST_SYNC(vcpu, STAGE_INIT_FETCH_PROT_OVERRIDE);
+ }
if (guest_0_page == 0)
mprotect(addr_gva2hva(vm, (vm_vaddr_t)0), PAGE_SIZE, PROT_READ);
run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
run->kvm_dirty_regs = KVM_SYNC_CRS;
- HOST_SYNC(vm, TEST_FETCH_PROT_OVERRIDE);
+ HOST_SYNC(vcpu, TEST_FETCH_PROT_OVERRIDE);
run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
run->kvm_dirty_regs = KVM_SYNC_CRS;
- HOST_SYNC(vm, TEST_STORAGE_PROT_OVERRIDE);
+ HOST_SYNC(vcpu, TEST_STORAGE_PROT_OVERRIDE);
+
+ kvm_vm_free(vm);
+
+ ksft_finished(); /* Print results and exit() accordingly */
}
diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c
index 73bc297dabe6..0d55f508d595 100644
--- a/tools/testing/selftests/kvm/set_memory_region_test.c
+++ b/tools/testing/selftests/kvm/set_memory_region_test.c
@@ -17,8 +17,6 @@
#include <kvm_util.h>
#include <processor.h>
-#define VCPU_ID 0
-
/*
* s390x needs at least 1MB alignment, and the x86_64 MOVE/DELETE tests need a
* 2MB sized and aligned region so that the initial region corresponds to
@@ -54,8 +52,8 @@ static inline uint64_t guest_spin_on_val(uint64_t spin_val)
static void *vcpu_worker(void *data)
{
- struct kvm_vm *vm = data;
- struct kvm_run *run;
+ struct kvm_vcpu *vcpu = data;
+ struct kvm_run *run = vcpu->run;
struct ucall uc;
uint64_t cmd;
@@ -64,13 +62,11 @@ static void *vcpu_worker(void *data)
* which will occur if the guest attempts to access a memslot after it
* has been deleted or while it is being moved .
*/
- run = vcpu_state(vm, VCPU_ID);
-
while (1) {
- vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
if (run->exit_reason == KVM_EXIT_IO) {
- cmd = get_ucall(vm, VCPU_ID, &uc);
+ cmd = get_ucall(vcpu, &uc);
if (cmd != UCALL_SYNC)
break;
@@ -92,8 +88,7 @@ static void *vcpu_worker(void *data)
}
if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT)
- TEST_FAIL("%s at %s:%ld, val = %lu", (const char *)uc.args[0],
- __FILE__, uc.args[1], uc.args[2]);
+ REPORT_GUEST_ASSERT_1(uc, "val = %lu");
return NULL;
}
@@ -113,13 +108,14 @@ static void wait_for_vcpu(void)
usleep(100000);
}
-static struct kvm_vm *spawn_vm(pthread_t *vcpu_thread, void *guest_code)
+static struct kvm_vm *spawn_vm(struct kvm_vcpu **vcpu, pthread_t *vcpu_thread,
+ void *guest_code)
{
struct kvm_vm *vm;
uint64_t *hva;
uint64_t gpa;
- vm = vm_create_default(VCPU_ID, 0, guest_code);
+ vm = vm_create_with_one_vcpu(vcpu, guest_code);
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP,
MEM_REGION_GPA, MEM_REGION_SLOT,
@@ -138,7 +134,7 @@ static struct kvm_vm *spawn_vm(pthread_t *vcpu_thread, void *guest_code)
hva = addr_gpa2hva(vm, MEM_REGION_GPA);
memset(hva, 0, 2 * 4096);
- pthread_create(vcpu_thread, NULL, vcpu_worker, vm);
+ pthread_create(vcpu_thread, NULL, vcpu_worker, *vcpu);
/* Ensure the guest thread is spun up. */
wait_for_vcpu();
@@ -180,10 +176,11 @@ static void guest_code_move_memory_region(void)
static void test_move_memory_region(void)
{
pthread_t vcpu_thread;
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
uint64_t *hva;
- vm = spawn_vm(&vcpu_thread, guest_code_move_memory_region);
+ vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_move_memory_region);
hva = addr_gpa2hva(vm, MEM_REGION_GPA);
@@ -258,11 +255,12 @@ static void guest_code_delete_memory_region(void)
static void test_delete_memory_region(void)
{
pthread_t vcpu_thread;
+ struct kvm_vcpu *vcpu;
struct kvm_regs regs;
struct kvm_run *run;
struct kvm_vm *vm;
- vm = spawn_vm(&vcpu_thread, guest_code_delete_memory_region);
+ vm = spawn_vm(&vcpu, &vcpu_thread, guest_code_delete_memory_region);
/* Delete the memory region, the guest should not die. */
vm_mem_region_delete(vm, MEM_REGION_SLOT);
@@ -286,13 +284,13 @@ static void test_delete_memory_region(void)
pthread_join(vcpu_thread, NULL);
- run = vcpu_state(vm, VCPU_ID);
+ run = vcpu->run;
TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN ||
run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
"Unexpected exit reason = %d", run->exit_reason);
- vcpu_regs_get(vm, VCPU_ID, &regs);
+ vcpu_regs_get(vcpu, &regs);
/*
* On AMD, after KVM_EXIT_SHUTDOWN the VMCB has been reinitialized already,
@@ -309,19 +307,19 @@ static void test_delete_memory_region(void)
static void test_zero_memory_regions(void)
{
+ struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vm *vm;
pr_info("Testing KVM_RUN with zero added memory regions\n");
- vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
- vm_vcpu_add(vm, VCPU_ID);
+ vm = vm_create_barebones();
+ vcpu = __vm_vcpu_add(vm, 0);
- TEST_ASSERT(!ioctl(vm_get_fd(vm), KVM_SET_NR_MMU_PAGES, 64),
- "KVM_SET_NR_MMU_PAGES failed, errno = %d\n", errno);
- vcpu_run(vm, VCPU_ID);
+ vm_ioctl(vm, KVM_SET_NR_MMU_PAGES, (void *)64ul);
+ vcpu_run(vcpu);
- run = vcpu_state(vm, VCPU_ID);
+ run = vcpu->run;
TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
"Unexpected exit_reason = %u\n", run->exit_reason);
@@ -354,7 +352,7 @@ static void test_add_max_memory_regions(void)
"KVM_CAP_NR_MEMSLOTS should be greater than 0");
pr_info("Allowed number of memory slots: %i\n", max_mem_slots);
- vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
+ vm = vm_create_barebones();
/* Check it can be added memory slots up to the maximum allowed */
pr_info("Adding slots 0..%i, each memory region with %dK size\n",
diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c
index 62f2eb9ee3d5..db8967f1a17b 100644
--- a/tools/testing/selftests/kvm/steal_time.c
+++ b/tools/testing/selftests/kvm/steal_time.c
@@ -58,36 +58,32 @@ static void guest_code(int cpu)
GUEST_DONE();
}
-static void steal_time_init(struct kvm_vm *vm)
+static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
{
- int i;
-
- if (!(kvm_get_supported_cpuid_entry(KVM_CPUID_FEATURES)->eax &
- KVM_FEATURE_STEAL_TIME)) {
- print_skip("steal-time not supported");
- exit(KSFT_SKIP);
- }
+ return kvm_cpu_has(X86_FEATURE_KVM_STEAL_TIME);
+}
- for (i = 0; i < NR_VCPUS; ++i) {
- int ret;
+static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
+{
+ int ret;
- /* ST_GPA_BASE is identity mapped */
- st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
- sync_global_to_guest(vm, st_gva[i]);
+ /* ST_GPA_BASE is identity mapped */
+ st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
+ sync_global_to_guest(vcpu->vm, st_gva[i]);
- ret = _vcpu_set_msr(vm, i, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_STEAL_RESERVED_MASK);
- TEST_ASSERT(ret == 0, "Bad GPA didn't fail");
+ ret = _vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME,
+ (ulong)st_gva[i] | KVM_STEAL_RESERVED_MASK);
+ TEST_ASSERT(ret == 0, "Bad GPA didn't fail");
- vcpu_set_msr(vm, i, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED);
- }
+ vcpu_set_msr(vcpu, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED);
}
-static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpuid)
+static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
{
- struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpuid]);
+ struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
int i;
- pr_info("VCPU%d:\n", vcpuid);
+ pr_info("VCPU%d:\n", vcpu_idx);
pr_info(" steal: %lld\n", st->steal);
pr_info(" version: %d\n", st->version);
pr_info(" flags: %d\n", st->flags);
@@ -118,17 +114,10 @@ struct st_time {
static int64_t smccc(uint32_t func, uint64_t arg)
{
- unsigned long ret;
-
- asm volatile(
- "mov w0, %w1\n"
- "mov x1, %2\n"
- "hvc #0\n"
- "mov %0, x0\n"
- : "=r" (ret) : "r" (func), "r" (arg) :
- "x0", "x1", "x2", "x3");
+ struct arm_smccc_res res;
- return ret;
+ smccc_hvc(func, arg, 0, 0, 0, 0, 0, 0, &res);
+ return res.a0;
}
static void check_status(struct st_time *st)
@@ -165,49 +154,50 @@ static void guest_code(int cpu)
GUEST_DONE();
}
-static void steal_time_init(struct kvm_vm *vm)
+static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
{
struct kvm_device_attr dev = {
.group = KVM_ARM_VCPU_PVTIME_CTRL,
.attr = KVM_ARM_VCPU_PVTIME_IPA,
};
- int i, ret;
- ret = _vcpu_ioctl(vm, 0, KVM_HAS_DEVICE_ATTR, &dev);
- if (ret != 0 && errno == ENXIO) {
- print_skip("steal-time not supported");
- exit(KSFT_SKIP);
- }
-
- for (i = 0; i < NR_VCPUS; ++i) {
- uint64_t st_ipa;
+ return !__vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
+}
- vcpu_ioctl(vm, i, KVM_HAS_DEVICE_ATTR, &dev);
+static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i)
+{
+ struct kvm_vm *vm = vcpu->vm;
+ uint64_t st_ipa;
+ int ret;
- dev.addr = (uint64_t)&st_ipa;
+ struct kvm_device_attr dev = {
+ .group = KVM_ARM_VCPU_PVTIME_CTRL,
+ .attr = KVM_ARM_VCPU_PVTIME_IPA,
+ .addr = (uint64_t)&st_ipa,
+ };
- /* ST_GPA_BASE is identity mapped */
- st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
- sync_global_to_guest(vm, st_gva[i]);
+ vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &dev);
- st_ipa = (ulong)st_gva[i] | 1;
- ret = _vcpu_ioctl(vm, i, KVM_SET_DEVICE_ATTR, &dev);
- TEST_ASSERT(ret == -1 && errno == EINVAL, "Bad IPA didn't report EINVAL");
+ /* ST_GPA_BASE is identity mapped */
+ st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
+ sync_global_to_guest(vm, st_gva[i]);
- st_ipa = (ulong)st_gva[i];
- vcpu_ioctl(vm, i, KVM_SET_DEVICE_ATTR, &dev);
+ st_ipa = (ulong)st_gva[i] | 1;
+ ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
+ TEST_ASSERT(ret == -1 && errno == EINVAL, "Bad IPA didn't report EINVAL");
- ret = _vcpu_ioctl(vm, i, KVM_SET_DEVICE_ATTR, &dev);
- TEST_ASSERT(ret == -1 && errno == EEXIST, "Set IPA twice without EEXIST");
+ st_ipa = (ulong)st_gva[i];
+ vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
- }
+ ret = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &dev);
+ TEST_ASSERT(ret == -1 && errno == EEXIST, "Set IPA twice without EEXIST");
}
-static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpuid)
+static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx)
{
- struct st_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpuid]);
+ struct st_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]);
- pr_info("VCPU%d:\n", vcpuid);
+ pr_info("VCPU%d:\n", vcpu_idx);
pr_info(" rev: %d\n", st->rev);
pr_info(" attr: %d\n", st->attr);
pr_info(" st_time: %ld\n", st->st_time);
@@ -231,29 +221,27 @@ static void *do_steal_time(void *arg)
return NULL;
}
-static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid)
+static void run_vcpu(struct kvm_vcpu *vcpu)
{
struct ucall uc;
- vcpu_args_set(vm, vcpuid, 1, vcpuid);
-
- vcpu_ioctl(vm, vcpuid, KVM_RUN, NULL);
+ vcpu_run(vcpu);
- switch (get_ucall(vm, vcpuid, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
case UCALL_DONE:
break;
case UCALL_ABORT:
- TEST_ASSERT(false, "%s at %s:%ld", (const char *)uc.args[0],
- __FILE__, uc.args[1]);
+ REPORT_GUEST_ASSERT(uc);
default:
TEST_ASSERT(false, "Unexpected exit: %s",
- exit_reason_str(vcpu_state(vm, vcpuid)->exit_reason));
+ exit_reason_str(vcpu->run->exit_reason));
}
}
int main(int ac, char **av)
{
+ struct kvm_vcpu *vcpus[NR_VCPUS];
struct kvm_vm *vm;
pthread_attr_t attr;
pthread_t thread;
@@ -273,26 +261,26 @@ int main(int ac, char **av)
pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
- /* Create a one VCPU guest and an identity mapped memslot for the steal time structure */
- vm = vm_create_default(0, 0, guest_code);
+ /* Create a VM and an identity mapped memslot for the steal time structure */
+ vm = vm_create_with_vcpus(NR_VCPUS, guest_code, vcpus);
gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE * NR_VCPUS);
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages);
ucall_init(vm, NULL);
- /* Add the rest of the VCPUs */
- for (i = 1; i < NR_VCPUS; ++i)
- vm_vcpu_add_default(vm, i, guest_code);
-
- steal_time_init(vm);
+ TEST_REQUIRE(is_steal_time_supported(vcpus[0]));
/* Run test on each VCPU */
for (i = 0; i < NR_VCPUS; ++i) {
+ steal_time_init(vcpus[i], i);
+
+ vcpu_args_set(vcpus[i], 1, i);
+
/* First VCPU run initializes steal-time */
- run_vcpu(vm, i);
+ run_vcpu(vcpus[i]);
/* Second VCPU run, expect guest stolen time to be <= run_delay */
- run_vcpu(vm, i);
+ run_vcpu(vcpus[i]);
sync_global_from_guest(vm, guest_stolen_time[i]);
stolen_time = guest_stolen_time[i];
run_delay = get_run_delay();
@@ -313,7 +301,7 @@ int main(int ac, char **av)
MIN_RUN_DELAY_NS, run_delay);
/* Run VCPU again to confirm stolen time is consistent with run_delay */
- run_vcpu(vm, i);
+ run_vcpu(vcpus[i]);
sync_global_from_guest(vm, guest_stolen_time[i]);
stolen_time = guest_stolen_time[i] - stolen_time;
TEST_ASSERT(stolen_time >= run_delay,
diff --git a/tools/testing/selftests/kvm/system_counter_offset_test.c b/tools/testing/selftests/kvm/system_counter_offset_test.c
index b337bbbfa41f..1c274933912b 100644
--- a/tools/testing/selftests/kvm/system_counter_offset_test.c
+++ b/tools/testing/selftests/kvm/system_counter_offset_test.c
@@ -14,8 +14,6 @@
#include "kvm_util.h"
#include "processor.h"
-#define VCPU_ID 0
-
#ifdef __x86_64__
struct test_case {
@@ -28,19 +26,17 @@ static struct test_case test_cases[] = {
{ -180 * NSEC_PER_SEC },
};
-static void check_preconditions(struct kvm_vm *vm)
+static void check_preconditions(struct kvm_vcpu *vcpu)
{
- if (!_vcpu_has_device_attr(vm, VCPU_ID, KVM_VCPU_TSC_CTRL, KVM_VCPU_TSC_OFFSET))
- return;
-
- print_skip("KVM_VCPU_TSC_OFFSET not supported; skipping test");
- exit(KSFT_SKIP);
+ __TEST_REQUIRE(!__vcpu_has_device_attr(vcpu, KVM_VCPU_TSC_CTRL,
+ KVM_VCPU_TSC_OFFSET),
+ "KVM_VCPU_TSC_OFFSET not supported; skipping test");
}
-static void setup_system_counter(struct kvm_vm *vm, struct test_case *test)
+static void setup_system_counter(struct kvm_vcpu *vcpu, struct test_case *test)
{
- vcpu_access_device_attr(vm, VCPU_ID, KVM_VCPU_TSC_CTRL,
- KVM_VCPU_TSC_OFFSET, &test->tsc_offset, true);
+ vcpu_device_attr_set(vcpu, KVM_VCPU_TSC_CTRL, KVM_VCPU_TSC_OFFSET,
+ &test->tsc_offset);
}
static uint64_t guest_read_system_counter(struct test_case *test)
@@ -87,11 +83,10 @@ static void handle_sync(struct ucall *uc, uint64_t start, uint64_t end)
static void handle_abort(struct ucall *uc)
{
- TEST_FAIL("%s at %s:%ld", (const char *)uc->args[0],
- __FILE__, uc->args[1]);
+ REPORT_GUEST_ASSERT(*uc);
}
-static void enter_guest(struct kvm_vm *vm)
+static void enter_guest(struct kvm_vcpu *vcpu)
{
uint64_t start, end;
struct ucall uc;
@@ -100,12 +95,12 @@ static void enter_guest(struct kvm_vm *vm)
for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
struct test_case *test = &test_cases[i];
- setup_system_counter(vm, test);
+ setup_system_counter(vcpu, test);
start = host_read_guest_system_counter(test);
- vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
end = host_read_guest_system_counter(test);
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
handle_sync(&uc, start, end);
break;
@@ -114,19 +109,20 @@ static void enter_guest(struct kvm_vm *vm)
return;
default:
TEST_ASSERT(0, "unhandled ucall %ld\n",
- get_ucall(vm, VCPU_ID, &uc));
+ get_ucall(vcpu, &uc));
}
}
}
int main(void)
{
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- vm = vm_create_default(VCPU_ID, 0, guest_main);
- check_preconditions(vm);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_main);
+ check_preconditions(vcpu);
ucall_init(vm, NULL);
- enter_guest(vm);
+ enter_guest(vcpu);
kvm_vm_free(vm);
}
diff --git a/tools/testing/selftests/kvm/x86_64/amx_test.c b/tools/testing/selftests/kvm/x86_64/amx_test.c
index 76f65c22796f..dadcbad10a1d 100644
--- a/tools/testing/selftests/kvm/x86_64/amx_test.c
+++ b/tools/testing/selftests/kvm/x86_64/amx_test.c
@@ -25,10 +25,6 @@
# error This test is 64-bit only
#endif
-#define VCPU_ID 0
-#define X86_FEATURE_XSAVE (1 << 26)
-#define X86_FEATURE_OSXSAVE (1 << 27)
-
#define NUM_TILES 8
#define TILE_SIZE 1024
#define XSAVE_SIZE ((NUM_TILES * TILE_SIZE) + PAGE_SIZE)
@@ -124,15 +120,8 @@ static inline void __xsavec(struct xsave_data *data, uint64_t rfbm)
static inline void check_cpuid_xsave(void)
{
- uint32_t eax, ebx, ecx, edx;
-
- eax = 1;
- ecx = 0;
- cpuid(&eax, &ebx, &ecx, &edx);
- if (!(ecx & X86_FEATURE_XSAVE))
- GUEST_ASSERT(!"cpuid: no CPU xsave support!");
- if (!(ecx & X86_FEATURE_OSXSAVE))
- GUEST_ASSERT(!"cpuid: no OS xsave support!");
+ GUEST_ASSERT(this_cpu_has(X86_FEATURE_XSAVE));
+ GUEST_ASSERT(this_cpu_has(X86_FEATURE_OSXSAVE));
}
static bool check_xsave_supports_xtile(void)
@@ -144,10 +133,7 @@ static bool enum_xtile_config(void)
{
u32 eax, ebx, ecx, edx;
- eax = TILE_CPUID;
- ecx = TILE_PALETTE_CPUID_SUBLEAVE;
-
- cpuid(&eax, &ebx, &ecx, &edx);
+ __cpuid(TILE_CPUID, TILE_PALETTE_CPUID_SUBLEAVE, &eax, &ebx, &ecx, &edx);
if (!eax || !ebx || !ecx)
return false;
@@ -169,10 +155,7 @@ static bool enum_xsave_tile(void)
{
u32 eax, ebx, ecx, edx;
- eax = XSTATE_CPUID;
- ecx = XFEATURE_XTILEDATA;
-
- cpuid(&eax, &ebx, &ecx, &edx);
+ __cpuid(XSTATE_CPUID, XFEATURE_XTILEDATA, &eax, &ebx, &ecx, &edx);
if (!eax || !ebx)
return false;
@@ -187,10 +170,7 @@ static bool check_xsave_size(void)
u32 eax, ebx, ecx, edx;
bool valid = false;
- eax = XSTATE_CPUID;
- ecx = XSTATE_USER_STATE_SUBLEAVE;
-
- cpuid(&eax, &ebx, &ecx, &edx);
+ __cpuid(XSTATE_CPUID, XSTATE_USER_STATE_SUBLEAVE, &eax, &ebx, &ecx, &edx);
if (ebx && ebx <= XSAVE_SIZE)
valid = true;
@@ -316,46 +296,36 @@ void guest_nm_handler(struct ex_regs *regs)
int main(int argc, char *argv[])
{
- struct kvm_cpuid_entry2 *entry;
struct kvm_regs regs1, regs2;
- bool amx_supported = false;
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
struct kvm_x86_state *state;
- int xsave_restore_size = 0;
+ int xsave_restore_size;
vm_vaddr_t amx_cfg, tiledata, xsavedata;
struct ucall uc;
u32 amx_offset;
int stage, ret;
- vm_xsave_req_perm(XSTATE_XTILE_DATA_BIT);
+ vm_xsave_require_permission(XSTATE_XTILE_DATA_BIT);
/* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
- entry = kvm_get_supported_cpuid_entry(1);
- if (!(entry->ecx & X86_FEATURE_XSAVE)) {
- print_skip("XSAVE feature not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XSAVE));
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_AMX_TILE));
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XTILECFG));
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XTILEDATA));
- if (kvm_get_cpuid_max_basic() >= 0xd) {
- entry = kvm_get_supported_cpuid_index(0xd, 0);
- amx_supported = entry && !!(entry->eax & XFEATURE_MASK_XTILE);
- if (!amx_supported) {
- print_skip("AMX is not supported by the vCPU (eax=0x%x)", entry->eax);
- exit(KSFT_SKIP);
- }
- /* Get xsave/restore max size */
- xsave_restore_size = entry->ecx;
- }
+ /* Get xsave/restore max size */
+ xsave_restore_size = kvm_get_supported_cpuid_entry(0xd)->ecx;
- run = vcpu_state(vm, VCPU_ID);
- vcpu_regs_get(vm, VCPU_ID, &regs1);
+ run = vcpu->run;
+ vcpu_regs_get(vcpu, &regs1);
/* Register #NM handler */
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, VCPU_ID);
+ vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, NM_VECTOR, guest_nm_handler);
/* amx cfg for guest_code */
@@ -369,19 +339,18 @@ int main(int argc, char *argv[])
/* xsave data for guest_code */
xsavedata = vm_vaddr_alloc_pages(vm, 3);
memset(addr_gva2hva(vm, xsavedata), 0, 3 * getpagesize());
- vcpu_args_set(vm, VCPU_ID, 3, amx_cfg, tiledata, xsavedata);
+ vcpu_args_set(vcpu, 3, amx_cfg, tiledata, xsavedata);
for (stage = 1; ; stage++) {
- _vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
- __FILE__, uc.args[1]);
+ REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
switch (uc.args[1]) {
@@ -403,7 +372,7 @@ int main(int argc, char *argv[])
* size subtract 8K amx size.
*/
amx_offset = xsave_restore_size - NUM_TILES*TILE_SIZE;
- state = vcpu_save_state(vm, VCPU_ID);
+ state = vcpu_save_state(vcpu);
void *amx_start = (void *)state->xsave + amx_offset;
void *tiles_data = (void *)addr_gva2hva(vm, tiledata);
/* Only check TMM0 register, 1 tile */
@@ -424,22 +393,20 @@ int main(int argc, char *argv[])
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
- state = vcpu_save_state(vm, VCPU_ID);
+ state = vcpu_save_state(vcpu);
memset(&regs1, 0, sizeof(regs1));
- vcpu_regs_get(vm, VCPU_ID, &regs1);
+ vcpu_regs_get(vcpu, &regs1);
kvm_vm_release(vm);
/* Restore state in a new VM. */
- kvm_vm_restart(vm, O_RDWR);
- vm_vcpu_add(vm, VCPU_ID);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
- vcpu_load_state(vm, VCPU_ID, state);
- run = vcpu_state(vm, VCPU_ID);
+ vcpu = vm_recreate_with_one_vcpu(vm);
+ vcpu_load_state(vcpu, state);
+ run = vcpu->run;
kvm_x86_state_cleanup(state);
memset(&regs2, 0, sizeof(regs2));
- vcpu_regs_get(vm, VCPU_ID, &regs2);
+ vcpu_regs_get(vcpu, &regs2);
TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
"Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
(ulong) regs2.rdi, (ulong) regs2.rsi);
diff --git a/tools/testing/selftests/kvm/x86_64/cpuid_test.c b/tools/testing/selftests/kvm/x86_64/cpuid_test.c
index 16d2465c5634..a6aeee2e62e4 100644
--- a/tools/testing/selftests/kvm/x86_64/cpuid_test.c
+++ b/tools/testing/selftests/kvm/x86_64/cpuid_test.c
@@ -12,8 +12,6 @@
#include "kvm_util.h"
#include "processor.h"
-#define VCPU_ID 0
-
/* CPUIDs known to differ */
struct {
u32 function;
@@ -33,10 +31,9 @@ static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid)
u32 eax, ebx, ecx, edx;
for (i = 0; i < guest_cpuid->nent; i++) {
- eax = guest_cpuid->entries[i].function;
- ecx = guest_cpuid->entries[i].index;
-
- cpuid(&eax, &ebx, &ecx, &edx);
+ __cpuid(guest_cpuid->entries[i].function,
+ guest_cpuid->entries[i].index,
+ &eax, &ebx, &ecx, &edx);
GUEST_ASSERT(eax == guest_cpuid->entries[i].eax &&
ebx == guest_cpuid->entries[i].ebx &&
@@ -48,9 +45,9 @@ static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid)
static void test_cpuid_40000000(struct kvm_cpuid2 *guest_cpuid)
{
- u32 eax = 0x40000000, ebx, ecx = 0, edx;
+ u32 eax, ebx, ecx, edx;
- cpuid(&eax, &ebx, &ecx, &edx);
+ cpuid(0x40000000, &eax, &ebx, &ecx, &edx);
GUEST_ASSERT(eax == 0x40000001);
}
@@ -68,7 +65,7 @@ static void guest_main(struct kvm_cpuid2 *guest_cpuid)
GUEST_DONE();
}
-static bool is_cpuid_mangled(struct kvm_cpuid_entry2 *entrie)
+static bool is_cpuid_mangled(const struct kvm_cpuid_entry2 *entrie)
{
int i;
@@ -81,50 +78,44 @@ static bool is_cpuid_mangled(struct kvm_cpuid_entry2 *entrie)
return false;
}
-static void check_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 *entrie)
+static void compare_cpuids(const struct kvm_cpuid2 *cpuid1,
+ const struct kvm_cpuid2 *cpuid2)
{
+ const struct kvm_cpuid_entry2 *e1, *e2;
int i;
- for (i = 0; i < cpuid->nent; i++) {
- if (cpuid->entries[i].function == entrie->function &&
- cpuid->entries[i].index == entrie->index) {
- if (is_cpuid_mangled(entrie))
- return;
-
- TEST_ASSERT(cpuid->entries[i].eax == entrie->eax &&
- cpuid->entries[i].ebx == entrie->ebx &&
- cpuid->entries[i].ecx == entrie->ecx &&
- cpuid->entries[i].edx == entrie->edx,
- "CPUID 0x%x.%x differ: 0x%x:0x%x:0x%x:0x%x vs 0x%x:0x%x:0x%x:0x%x",
- entrie->function, entrie->index,
- cpuid->entries[i].eax, cpuid->entries[i].ebx,
- cpuid->entries[i].ecx, cpuid->entries[i].edx,
- entrie->eax, entrie->ebx, entrie->ecx, entrie->edx);
- return;
- }
- }
+ TEST_ASSERT(cpuid1->nent == cpuid2->nent,
+ "CPUID nent mismatch: %d vs. %d", cpuid1->nent, cpuid2->nent);
- TEST_ASSERT(false, "CPUID 0x%x.%x not found", entrie->function, entrie->index);
-}
+ for (i = 0; i < cpuid1->nent; i++) {
+ e1 = &cpuid1->entries[i];
+ e2 = &cpuid2->entries[i];
-static void compare_cpuids(struct kvm_cpuid2 *cpuid1, struct kvm_cpuid2 *cpuid2)
-{
- int i;
+ TEST_ASSERT(e1->function == e2->function &&
+ e1->index == e2->index && e1->flags == e2->flags,
+ "CPUID entries[%d] mismtach: 0x%x.%d.%x vs. 0x%x.%d.%x\n",
+ i, e1->function, e1->index, e1->flags,
+ e2->function, e2->index, e2->flags);
- for (i = 0; i < cpuid1->nent; i++)
- check_cpuid(cpuid2, &cpuid1->entries[i]);
+ if (is_cpuid_mangled(e1))
+ continue;
- for (i = 0; i < cpuid2->nent; i++)
- check_cpuid(cpuid1, &cpuid2->entries[i]);
+ TEST_ASSERT(e1->eax == e2->eax && e1->ebx == e2->ebx &&
+ e1->ecx == e2->ecx && e1->edx == e2->edx,
+ "CPUID 0x%x.%x differ: 0x%x:0x%x:0x%x:0x%x vs 0x%x:0x%x:0x%x:0x%x",
+ e1->function, e1->index,
+ e1->eax, e1->ebx, e1->ecx, e1->edx,
+ e2->eax, e2->ebx, e2->ecx, e2->edx);
+ }
}
-static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid, int stage)
+static void run_vcpu(struct kvm_vcpu *vcpu, int stage)
{
struct ucall uc;
- _vcpu_run(vm, vcpuid);
+ vcpu_run(vcpu);
- switch (get_ucall(vm, vcpuid, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
uc.args[1] == stage + 1,
@@ -134,11 +125,10 @@ static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid, int stage)
case UCALL_DONE:
return;
case UCALL_ABORT:
- TEST_ASSERT(false, "%s at %s:%ld\n\tvalues: %#lx, %#lx", (const char *)uc.args[0],
- __FILE__, uc.args[1], uc.args[2], uc.args[3]);
+ REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
default:
TEST_ASSERT(false, "Unexpected exit: %s",
- exit_reason_str(vcpu_state(vm, vcpuid)->exit_reason));
+ exit_reason_str(vcpu->run->exit_reason));
}
}
@@ -154,56 +144,53 @@ struct kvm_cpuid2 *vcpu_alloc_cpuid(struct kvm_vm *vm, vm_vaddr_t *p_gva, struct
return guest_cpuids;
}
-static void set_cpuid_after_run(struct kvm_vm *vm, struct kvm_cpuid2 *cpuid)
+static void set_cpuid_after_run(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *ent;
int rc;
u32 eax, ebx, x;
/* Setting unmodified CPUID is allowed */
- rc = __vcpu_set_cpuid(vm, VCPU_ID, cpuid);
+ rc = __vcpu_set_cpuid(vcpu);
TEST_ASSERT(!rc, "Setting unmodified CPUID after KVM_RUN failed: %d", rc);
/* Changing CPU features is forbidden */
- ent = get_cpuid(cpuid, 0x7, 0);
+ ent = vcpu_get_cpuid_entry(vcpu, 0x7);
ebx = ent->ebx;
ent->ebx--;
- rc = __vcpu_set_cpuid(vm, VCPU_ID, cpuid);
+ rc = __vcpu_set_cpuid(vcpu);
TEST_ASSERT(rc, "Changing CPU features should fail");
ent->ebx = ebx;
/* Changing MAXPHYADDR is forbidden */
- ent = get_cpuid(cpuid, 0x80000008, 0);
+ ent = vcpu_get_cpuid_entry(vcpu, 0x80000008);
eax = ent->eax;
x = eax & 0xff;
ent->eax = (eax & ~0xffu) | (x - 1);
- rc = __vcpu_set_cpuid(vm, VCPU_ID, cpuid);
+ rc = __vcpu_set_cpuid(vcpu);
TEST_ASSERT(rc, "Changing MAXPHYADDR should fail");
ent->eax = eax;
}
int main(void)
{
- struct kvm_cpuid2 *supp_cpuid, *cpuid2;
+ struct kvm_vcpu *vcpu;
vm_vaddr_t cpuid_gva;
struct kvm_vm *vm;
int stage;
- vm = vm_create_default(VCPU_ID, 0, guest_main);
-
- supp_cpuid = kvm_get_supported_cpuid();
- cpuid2 = vcpu_get_cpuid(vm, VCPU_ID);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_main);
- compare_cpuids(supp_cpuid, cpuid2);
+ compare_cpuids(kvm_get_supported_cpuid(), vcpu->cpuid);
- vcpu_alloc_cpuid(vm, &cpuid_gva, cpuid2);
+ vcpu_alloc_cpuid(vm, &cpuid_gva, vcpu->cpuid);
- vcpu_args_set(vm, VCPU_ID, 1, cpuid_gva);
+ vcpu_args_set(vcpu, 1, cpuid_gva);
for (stage = 0; stage < 3; stage++)
- run_vcpu(vm, VCPU_ID, stage);
+ run_vcpu(vcpu, stage);
- set_cpuid_after_run(vm, cpuid2);
+ set_cpuid_after_run(vcpu);
kvm_vm_free(vm);
}
diff --git a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
index 6f6fd189dda3..4208487652f8 100644
--- a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
+++ b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
@@ -19,25 +19,11 @@
#include "kvm_util.h"
#include "processor.h"
-#define X86_FEATURE_XSAVE (1<<26)
-#define X86_FEATURE_OSXSAVE (1<<27)
-#define VCPU_ID 1
-
static inline bool cr4_cpuid_is_sync(void)
{
- int func, subfunc;
- uint32_t eax, ebx, ecx, edx;
- uint64_t cr4;
-
- func = 0x1;
- subfunc = 0x0;
- __asm__ __volatile__("cpuid"
- : "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
- : "a"(func), "c"(subfunc));
+ uint64_t cr4 = get_cr4();
- cr4 = get_cr4();
-
- return (!!(ecx & X86_FEATURE_OSXSAVE)) == (!!(cr4 & X86_CR4_OSXSAVE));
+ return (this_cpu_has(X86_FEATURE_OSXSAVE) == !!(cr4 & X86_CR4_OSXSAVE));
}
static void guest_code(void)
@@ -63,44 +49,37 @@ static void guest_code(void)
int main(int argc, char *argv[])
{
+ struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vm *vm;
struct kvm_sregs sregs;
- struct kvm_cpuid_entry2 *entry;
struct ucall uc;
- int rc;
- entry = kvm_get_supported_cpuid_entry(1);
- if (!(entry->ecx & X86_FEATURE_XSAVE)) {
- print_skip("XSAVE feature not supported");
- return 0;
- }
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XSAVE));
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code);
- run = vcpu_state(vm, VCPU_ID);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+ run = vcpu->run;
while (1) {
- rc = _vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
- TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
/* emulate hypervisor clearing CR4.OSXSAVE */
- vcpu_sregs_get(vm, VCPU_ID, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
sregs.cr4 &= ~X86_CR4_OSXSAVE;
- vcpu_sregs_set(vm, VCPU_ID, &sregs);
+ vcpu_sregs_set(vcpu, &sregs);
break;
case UCALL_ABORT:
- TEST_FAIL("Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
+ REPORT_GUEST_ASSERT(uc);
break;
case UCALL_DONE:
goto done;
diff --git a/tools/testing/selftests/kvm/x86_64/debug_regs.c b/tools/testing/selftests/kvm/x86_64/debug_regs.c
index 5f078db1bcba..7ef99c3359a0 100644
--- a/tools/testing/selftests/kvm/x86_64/debug_regs.c
+++ b/tools/testing/selftests/kvm/x86_64/debug_regs.c
@@ -10,8 +10,6 @@
#include "processor.h"
#include "apic.h"
-#define VCPU_ID 0
-
#define DR6_BD (1 << 13)
#define DR7_GD (1 << 13)
@@ -66,21 +64,22 @@ static void guest_code(void)
GUEST_DONE();
}
-#define CLEAR_DEBUG() memset(&debug, 0, sizeof(debug))
-#define APPLY_DEBUG() vcpu_set_guest_debug(vm, VCPU_ID, &debug)
#define CAST_TO_RIP(v) ((unsigned long long)&(v))
-#define SET_RIP(v) do { \
- vcpu_regs_get(vm, VCPU_ID, &regs); \
- regs.rip = (v); \
- vcpu_regs_set(vm, VCPU_ID, &regs); \
- } while (0)
-#define MOVE_RIP(v) SET_RIP(regs.rip + (v));
+
+static void vcpu_skip_insn(struct kvm_vcpu *vcpu, int insn_len)
+{
+ struct kvm_regs regs;
+
+ vcpu_regs_get(vcpu, &regs);
+ regs.rip += insn_len;
+ vcpu_regs_set(vcpu, &regs);
+}
int main(void)
{
struct kvm_guest_debug debug;
unsigned long long target_dr6, target_rip;
- struct kvm_regs regs;
+ struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vm *vm;
struct ucall uc;
@@ -96,35 +95,32 @@ int main(void)
1, /* cli */
};
- if (!kvm_check_cap(KVM_CAP_SET_GUEST_DEBUG)) {
- print_skip("KVM_CAP_SET_GUEST_DEBUG not supported");
- return 0;
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_SET_GUEST_DEBUG));
- vm = vm_create_default(VCPU_ID, 0, guest_code);
- run = vcpu_state(vm, VCPU_ID);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+ run = vcpu->run;
/* Test software BPs - int3 */
- CLEAR_DEBUG();
+ memset(&debug, 0, sizeof(debug));
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
- APPLY_DEBUG();
- vcpu_run(vm, VCPU_ID);
+ vcpu_guest_debug_set(vcpu, &debug);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
run->debug.arch.exception == BP_VECTOR &&
run->debug.arch.pc == CAST_TO_RIP(sw_bp),
"INT3: exit %d exception %d rip 0x%llx (should be 0x%llx)",
run->exit_reason, run->debug.arch.exception,
run->debug.arch.pc, CAST_TO_RIP(sw_bp));
- MOVE_RIP(1);
+ vcpu_skip_insn(vcpu, 1);
/* Test instruction HW BP over DR[0-3] */
for (i = 0; i < 4; i++) {
- CLEAR_DEBUG();
+ memset(&debug, 0, sizeof(debug));
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
debug.arch.debugreg[i] = CAST_TO_RIP(hw_bp);
debug.arch.debugreg[7] = 0x400 | (1UL << (2*i+1));
- APPLY_DEBUG();
- vcpu_run(vm, VCPU_ID);
+ vcpu_guest_debug_set(vcpu, &debug);
+ vcpu_run(vcpu);
target_dr6 = 0xffff0ff0 | (1UL << i);
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
run->debug.arch.exception == DB_VECTOR &&
@@ -137,17 +133,17 @@ int main(void)
run->debug.arch.dr6, target_dr6);
}
/* Skip "nop" */
- MOVE_RIP(1);
+ vcpu_skip_insn(vcpu, 1);
/* Test data access HW BP over DR[0-3] */
for (i = 0; i < 4; i++) {
- CLEAR_DEBUG();
+ memset(&debug, 0, sizeof(debug));
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
debug.arch.debugreg[i] = CAST_TO_RIP(guest_value);
debug.arch.debugreg[7] = 0x00000400 | (1UL << (2*i+1)) |
(0x000d0000UL << (4*i));
- APPLY_DEBUG();
- vcpu_run(vm, VCPU_ID);
+ vcpu_guest_debug_set(vcpu, &debug);
+ vcpu_run(vcpu);
target_dr6 = 0xffff0ff0 | (1UL << i);
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
run->debug.arch.exception == DB_VECTOR &&
@@ -159,23 +155,22 @@ int main(void)
run->debug.arch.pc, CAST_TO_RIP(write_data),
run->debug.arch.dr6, target_dr6);
/* Rollback the 4-bytes "mov" */
- MOVE_RIP(-7);
+ vcpu_skip_insn(vcpu, -7);
}
/* Skip the 4-bytes "mov" */
- MOVE_RIP(7);
+ vcpu_skip_insn(vcpu, 7);
/* Test single step */
target_rip = CAST_TO_RIP(ss_start);
target_dr6 = 0xffff4ff0ULL;
- vcpu_regs_get(vm, VCPU_ID, &regs);
for (i = 0; i < (sizeof(ss_size) / sizeof(ss_size[0])); i++) {
target_rip += ss_size[i];
- CLEAR_DEBUG();
+ memset(&debug, 0, sizeof(debug));
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP |
KVM_GUESTDBG_BLOCKIRQ;
debug.arch.debugreg[7] = 0x00000400;
- APPLY_DEBUG();
- vcpu_run(vm, VCPU_ID);
+ vcpu_guest_debug_set(vcpu, &debug);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
run->debug.arch.exception == DB_VECTOR &&
run->debug.arch.pc == target_rip &&
@@ -188,11 +183,11 @@ int main(void)
}
/* Finally test global disable */
- CLEAR_DEBUG();
+ memset(&debug, 0, sizeof(debug));
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
debug.arch.debugreg[7] = 0x400 | DR7_GD;
- APPLY_DEBUG();
- vcpu_run(vm, VCPU_ID);
+ vcpu_guest_debug_set(vcpu, &debug);
+ vcpu_run(vcpu);
target_dr6 = 0xffff0ff0 | DR6_BD;
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
run->debug.arch.exception == DB_VECTOR &&
@@ -205,12 +200,12 @@ int main(void)
target_dr6);
/* Disable all debug controls, run to the end */
- CLEAR_DEBUG();
- APPLY_DEBUG();
+ memset(&debug, 0, sizeof(debug));
+ vcpu_guest_debug_set(vcpu, &debug);
- vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, "KVM_EXIT_IO");
- cmd = get_ucall(vm, VCPU_ID, &uc);
+ cmd = get_ucall(vcpu, &uc);
TEST_ASSERT(cmd == UCALL_DONE, "UCALL_DONE");
kvm_vm_free(vm);
diff --git a/tools/testing/selftests/kvm/x86_64/emulator_error_test.c b/tools/testing/selftests/kvm/x86_64/emulator_error_test.c
index aeb3850f81bd..236e11755ba6 100644
--- a/tools/testing/selftests/kvm/x86_64/emulator_error_test.c
+++ b/tools/testing/selftests/kvm/x86_64/emulator_error_test.c
@@ -11,7 +11,6 @@
#include "kvm_util.h"
#include "vmx.h"
-#define VCPU_ID 1
#define MAXPHYADDR 36
#define MEM_REGION_GVA 0x0000123456789000
@@ -27,14 +26,6 @@ static void guest_code(void)
GUEST_DONE();
}
-static void run_guest(struct kvm_vm *vm)
-{
- int rc;
-
- rc = _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
-}
-
/*
* Accessors to get R/M, REG, and Mod bits described in the SDM vol 2,
* figure 2-2 "Table Interpretation of ModR/M Byte (C8H)".
@@ -56,9 +47,9 @@ static bool is_flds(uint8_t *insn_bytes, uint8_t insn_size)
GET_RM(insn_bytes[1]) != 0x5;
}
-static void process_exit_on_emulation_error(struct kvm_vm *vm)
+static void process_exit_on_emulation_error(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct kvm_run *run = vcpu->run;
struct kvm_regs regs;
uint8_t *insn_bytes;
uint8_t insn_size;
@@ -92,50 +83,48 @@ static void process_exit_on_emulation_error(struct kvm_vm *vm)
* contained an flds instruction that is 2-bytes in
* length (ie: no prefix, no SIB, no displacement).
*/
- vcpu_regs_get(vm, VCPU_ID, &regs);
+ vcpu_regs_get(vcpu, &regs);
regs.rip += 2;
- vcpu_regs_set(vm, VCPU_ID, &regs);
+ vcpu_regs_set(vcpu, &regs);
}
}
}
-static void do_guest_assert(struct kvm_vm *vm, struct ucall *uc)
+static void do_guest_assert(struct ucall *uc)
{
- TEST_FAIL("%s at %s:%ld", (const char *)uc->args[0], __FILE__,
- uc->args[1]);
+ REPORT_GUEST_ASSERT(*uc);
}
-static void check_for_guest_assert(struct kvm_vm *vm)
+static void check_for_guest_assert(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu_state(vm, VCPU_ID);
struct ucall uc;
- if (run->exit_reason == KVM_EXIT_IO &&
- get_ucall(vm, VCPU_ID, &uc) == UCALL_ABORT) {
- do_guest_assert(vm, &uc);
+ if (vcpu->run->exit_reason == KVM_EXIT_IO &&
+ get_ucall(vcpu, &uc) == UCALL_ABORT) {
+ do_guest_assert(&uc);
}
}
-static void process_ucall_done(struct kvm_vm *vm)
+static void process_ucall_done(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct kvm_run *run = vcpu->run;
struct ucall uc;
- check_for_guest_assert(vm);
+ check_for_guest_assert(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s)",
run->exit_reason,
exit_reason_str(run->exit_reason));
- TEST_ASSERT(get_ucall(vm, VCPU_ID, &uc) == UCALL_DONE,
+ TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_DONE,
"Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
uc.cmd, UCALL_DONE);
}
-static uint64_t process_ucall(struct kvm_vm *vm)
+static uint64_t process_ucall(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct kvm_run *run = vcpu->run;
struct ucall uc;
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
@@ -143,14 +132,14 @@ static uint64_t process_ucall(struct kvm_vm *vm)
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
break;
case UCALL_ABORT:
- do_guest_assert(vm, &uc);
+ do_guest_assert(&uc);
break;
case UCALL_DONE:
- process_ucall_done(vm);
+ process_ucall_done(vcpu);
break;
default:
TEST_ASSERT(false, "Unexpected ucall");
@@ -161,12 +150,7 @@ static uint64_t process_ucall(struct kvm_vm *vm)
int main(int argc, char *argv[])
{
- struct kvm_enable_cap emul_failure_cap = {
- .cap = KVM_CAP_EXIT_ON_EMULATION_FAILURE,
- .args[0] = 1,
- };
- struct kvm_cpuid_entry2 *entry;
- struct kvm_cpuid2 *cpuid;
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
uint64_t gpa, pte;
uint64_t *hva;
@@ -175,24 +159,15 @@ int main(int argc, char *argv[])
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
- vm = vm_create_default(VCPU_ID, 0, guest_code);
-
- if (!kvm_check_cap(KVM_CAP_SMALLER_MAXPHYADDR)) {
- printf("module parameter 'allow_smaller_maxphyaddr' is not set. Skipping test.\n");
- return 0;
- }
-
- cpuid = kvm_get_supported_cpuid();
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_SMALLER_MAXPHYADDR));
- entry = kvm_get_supported_cpuid_index(0x80000008, 0);
- entry->eax = (entry->eax & 0xffffff00) | MAXPHYADDR;
- set_cpuid(cpuid, entry);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, cpuid);
+ vcpu_set_cpuid_maxphyaddr(vcpu, MAXPHYADDR);
rc = kvm_check_cap(KVM_CAP_EXIT_ON_EMULATION_FAILURE);
TEST_ASSERT(rc, "KVM_CAP_EXIT_ON_EMULATION_FAILURE is unavailable");
- vm_enable_cap(vm, &emul_failure_cap);
+ vm_enable_cap(vm, KVM_CAP_EXIT_ON_EMULATION_FAILURE, 1);
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
MEM_REGION_GPA, MEM_REGION_SLOT,
@@ -203,14 +178,14 @@ int main(int argc, char *argv[])
virt_map(vm, MEM_REGION_GVA, MEM_REGION_GPA, 1);
hva = addr_gpa2hva(vm, MEM_REGION_GPA);
memset(hva, 0, PAGE_SIZE);
- pte = vm_get_page_table_entry(vm, VCPU_ID, MEM_REGION_GVA);
- vm_set_page_table_entry(vm, VCPU_ID, MEM_REGION_GVA, pte | (1ull << 36));
+ pte = vm_get_page_table_entry(vm, vcpu, MEM_REGION_GVA);
+ vm_set_page_table_entry(vm, vcpu, MEM_REGION_GVA, pte | (1ull << 36));
- run_guest(vm);
- process_exit_on_emulation_error(vm);
- run_guest(vm);
+ vcpu_run(vcpu);
+ process_exit_on_emulation_error(vcpu);
+ vcpu_run(vcpu);
- TEST_ASSERT(process_ucall(vm) == UCALL_DONE, "Expected UCALL_DONE");
+ TEST_ASSERT(process_ucall(vcpu) == UCALL_DONE, "Expected UCALL_DONE");
kvm_vm_free(vm);
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
index d12e043aa2ee..99bc202243d2 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -18,9 +18,6 @@
#include "vmx.h"
-#define VCPU_ID 5
-#define NMI_VECTOR 2
-
static int ud_count;
static void guest_ud_handler(struct ex_regs *regs)
@@ -160,88 +157,86 @@ void guest_code(struct vmx_pages *vmx_pages)
GUEST_DONE();
}
-void inject_nmi(struct kvm_vm *vm)
+void inject_nmi(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_events events;
- vcpu_events_get(vm, VCPU_ID, &events);
+ vcpu_events_get(vcpu, &events);
events.nmi.pending = 1;
events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
- vcpu_events_set(vm, VCPU_ID, &events);
+ vcpu_events_set(vcpu, &events);
}
-static void save_restore_vm(struct kvm_vm *vm)
+static struct kvm_vcpu *save_restore_vm(struct kvm_vm *vm,
+ struct kvm_vcpu *vcpu)
{
struct kvm_regs regs1, regs2;
struct kvm_x86_state *state;
- state = vcpu_save_state(vm, VCPU_ID);
+ state = vcpu_save_state(vcpu);
memset(&regs1, 0, sizeof(regs1));
- vcpu_regs_get(vm, VCPU_ID, &regs1);
+ vcpu_regs_get(vcpu, &regs1);
kvm_vm_release(vm);
/* Restore state in a new VM. */
- kvm_vm_restart(vm, O_RDWR);
- vm_vcpu_add(vm, VCPU_ID);
- vcpu_set_hv_cpuid(vm, VCPU_ID);
- vcpu_enable_evmcs(vm, VCPU_ID);
- vcpu_load_state(vm, VCPU_ID, state);
+ vcpu = vm_recreate_with_one_vcpu(vm);
+ vcpu_set_hv_cpuid(vcpu);
+ vcpu_enable_evmcs(vcpu);
+ vcpu_load_state(vcpu, state);
kvm_x86_state_cleanup(state);
memset(&regs2, 0, sizeof(regs2));
- vcpu_regs_get(vm, VCPU_ID, &regs2);
+ vcpu_regs_get(vcpu, &regs2);
TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
"Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
(ulong) regs2.rdi, (ulong) regs2.rsi);
+ return vcpu;
}
int main(int argc, char *argv[])
{
vm_vaddr_t vmx_pages_gva = 0;
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
struct ucall uc;
int stage;
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
- if (!nested_vmx_supported() ||
- !kvm_check_cap(KVM_CAP_NESTED_STATE) ||
- !kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
- print_skip("Enlightened VMCS is unsupported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS));
- vcpu_set_hv_cpuid(vm, VCPU_ID);
- vcpu_enable_evmcs(vm, VCPU_ID);
+ vcpu_set_hv_cpuid(vcpu);
+ vcpu_enable_evmcs(vcpu);
vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+ vcpu_args_set(vcpu, 1, vmx_pages_gva);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, VCPU_ID);
+ vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler);
pr_info("Running L1 which uses EVMCS to run L2\n");
for (stage = 1;; stage++) {
- run = vcpu_state(vm, VCPU_ID);
- _vcpu_run(vm, VCPU_ID);
+ run = vcpu->run;
+
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
- __FILE__, uc.args[1]);
+ REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
break;
@@ -256,12 +251,12 @@ int main(int argc, char *argv[])
uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
stage, (ulong)uc.args[1]);
- save_restore_vm(vm);
+ vcpu = save_restore_vm(vm, vcpu);
/* Force immediate L2->L1 exit before resuming */
if (stage == 8) {
pr_info("Injecting NMI into L1 before L2 had a chance to run after restore\n");
- inject_nmi(vm);
+ inject_nmi(vcpu);
}
/*
@@ -271,7 +266,7 @@ int main(int argc, char *argv[])
*/
if (stage == 9) {
pr_info("Trying extra KVM_GET_NESTED_STATE/KVM_SET_NESTED_STATE cycle\n");
- save_restore_vm(vm);
+ vcpu = save_restore_vm(vm, vcpu);
}
}
diff --git a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c
new file mode 100644
index 000000000000..b1905d280ef5
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020, Google LLC.
+ *
+ * Tests for KVM paravirtual feature disablement
+ */
+#include <asm/kvm_para.h>
+#include <linux/kvm_para.h>
+#include <linux/stringify.h>
+#include <stdint.h>
+
+#include "apic.h"
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+
+static bool ud_expected;
+
+static void guest_ud_handler(struct ex_regs *regs)
+{
+ GUEST_ASSERT(ud_expected);
+ GUEST_DONE();
+}
+
+extern unsigned char svm_hypercall_insn;
+static uint64_t svm_do_sched_yield(uint8_t apic_id)
+{
+ uint64_t ret;
+
+ asm volatile("mov %1, %%rax\n\t"
+ "mov %2, %%rbx\n\t"
+ "svm_hypercall_insn:\n\t"
+ "vmmcall\n\t"
+ "mov %%rax, %0\n\t"
+ : "=r"(ret)
+ : "r"((uint64_t)KVM_HC_SCHED_YIELD), "r"((uint64_t)apic_id)
+ : "rax", "rbx", "memory");
+
+ return ret;
+}
+
+extern unsigned char vmx_hypercall_insn;
+static uint64_t vmx_do_sched_yield(uint8_t apic_id)
+{
+ uint64_t ret;
+
+ asm volatile("mov %1, %%rax\n\t"
+ "mov %2, %%rbx\n\t"
+ "vmx_hypercall_insn:\n\t"
+ "vmcall\n\t"
+ "mov %%rax, %0\n\t"
+ : "=r"(ret)
+ : "r"((uint64_t)KVM_HC_SCHED_YIELD), "r"((uint64_t)apic_id)
+ : "rax", "rbx", "memory");
+
+ return ret;
+}
+
+static void assert_hypercall_insn(unsigned char *exp_insn, unsigned char *obs_insn)
+{
+ uint32_t exp = 0, obs = 0;
+
+ memcpy(&exp, exp_insn, sizeof(exp));
+ memcpy(&obs, obs_insn, sizeof(obs));
+
+ GUEST_ASSERT_EQ(exp, obs);
+}
+
+static void guest_main(void)
+{
+ unsigned char *native_hypercall_insn, *hypercall_insn;
+ uint8_t apic_id;
+
+ apic_id = GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID));
+
+ if (is_intel_cpu()) {
+ native_hypercall_insn = &vmx_hypercall_insn;
+ hypercall_insn = &svm_hypercall_insn;
+ svm_do_sched_yield(apic_id);
+ } else if (is_amd_cpu()) {
+ native_hypercall_insn = &svm_hypercall_insn;
+ hypercall_insn = &vmx_hypercall_insn;
+ vmx_do_sched_yield(apic_id);
+ } else {
+ GUEST_ASSERT(0);
+ /* unreachable */
+ return;
+ }
+
+ GUEST_ASSERT(!ud_expected);
+ assert_hypercall_insn(native_hypercall_insn, hypercall_insn);
+ GUEST_DONE();
+}
+
+static void setup_ud_vector(struct kvm_vcpu *vcpu)
+{
+ vm_init_descriptor_tables(vcpu->vm);
+ vcpu_init_descriptor_tables(vcpu);
+ vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler);
+}
+
+static void enter_guest(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ struct ucall uc;
+
+ vcpu_run(vcpu);
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_SYNC:
+ pr_info("%s: %016lx\n", (const char *)uc.args[2], uc.args[3]);
+ break;
+ case UCALL_DONE:
+ return;
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ default:
+ TEST_FAIL("Unhandled ucall: %ld\nexit_reason: %u (%s)",
+ uc.cmd, run->exit_reason, exit_reason_str(run->exit_reason));
+ }
+}
+
+static void test_fix_hypercall(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_main);
+ setup_ud_vector(vcpu);
+
+ ud_expected = false;
+ sync_global_to_guest(vm, ud_expected);
+
+ virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
+
+ enter_guest(vcpu);
+}
+
+static void test_fix_hypercall_disabled(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_main);
+ setup_ud_vector(vcpu);
+
+ vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2,
+ KVM_X86_QUIRK_FIX_HYPERCALL_INSN);
+
+ ud_expected = true;
+ sync_global_to_guest(vm, ud_expected);
+
+ virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
+
+ enter_guest(vcpu);
+}
+
+int main(void)
+{
+ TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN);
+
+ test_fix_hypercall();
+ test_fix_hypercall_disabled();
+}
diff --git a/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c b/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c
index 8aed0db1331d..d09b3cbcadc6 100644
--- a/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c
+++ b/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c
@@ -15,116 +15,21 @@
#include "kvm_util.h"
#include "processor.h"
-static int kvm_num_index_msrs(int kvm_fd, int nmsrs)
-{
- struct kvm_msr_list *list;
- int r;
-
- list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
- list->nmsrs = nmsrs;
- r = ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
- TEST_ASSERT(r == -1 && errno == E2BIG,
- "Unexpected result from KVM_GET_MSR_INDEX_LIST probe, r: %i",
- r);
-
- r = list->nmsrs;
- free(list);
- return r;
-}
-
-static void test_get_msr_index(void)
-{
- int old_res, res, kvm_fd, r;
- struct kvm_msr_list *list;
-
- kvm_fd = open_kvm_dev_path_or_exit();
-
- old_res = kvm_num_index_msrs(kvm_fd, 0);
- TEST_ASSERT(old_res != 0, "Expecting nmsrs to be > 0");
-
- if (old_res != 1) {
- res = kvm_num_index_msrs(kvm_fd, 1);
- TEST_ASSERT(res > 1, "Expecting nmsrs to be > 1");
- TEST_ASSERT(res == old_res, "Expecting nmsrs to be identical");
- }
-
- list = malloc(sizeof(*list) + old_res * sizeof(list->indices[0]));
- list->nmsrs = old_res;
- r = ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
-
- TEST_ASSERT(r == 0,
- "Unexpected result from KVM_GET_MSR_FEATURE_INDEX_LIST, r: %i",
- r);
- TEST_ASSERT(list->nmsrs == old_res, "Expecting nmsrs to be identical");
- free(list);
-
- close(kvm_fd);
-}
-
-static int kvm_num_feature_msrs(int kvm_fd, int nmsrs)
-{
- struct kvm_msr_list *list;
- int r;
-
- list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
- list->nmsrs = nmsrs;
- r = ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, list);
- TEST_ASSERT(r == -1 && errno == E2BIG,
- "Unexpected result from KVM_GET_MSR_FEATURE_INDEX_LIST probe, r: %i",
- r);
-
- r = list->nmsrs;
- free(list);
- return r;
-}
-
-struct kvm_msr_list *kvm_get_msr_feature_list(int kvm_fd, int nmsrs)
-{
- struct kvm_msr_list *list;
- int r;
-
- list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
- list->nmsrs = nmsrs;
- r = ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, list);
-
- TEST_ASSERT(r == 0,
- "Unexpected result from KVM_GET_MSR_FEATURE_INDEX_LIST, r: %i",
- r);
-
- return list;
-}
-
-static void test_get_msr_feature(void)
+int main(int argc, char *argv[])
{
- int res, old_res, i, kvm_fd;
- struct kvm_msr_list *feature_list;
+ const struct kvm_msr_list *feature_list;
+ int i;
- kvm_fd = open_kvm_dev_path_or_exit();
+ /*
+ * Skip the entire test if MSR_FEATURES isn't supported, other tests
+ * will cover the "regular" list of MSRs, the coverage here is purely
+ * opportunistic and not interesting on its own.
+ */
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_GET_MSR_FEATURES));
- old_res = kvm_num_feature_msrs(kvm_fd, 0);
- TEST_ASSERT(old_res != 0, "Expecting nmsrs to be > 0");
-
- if (old_res != 1) {
- res = kvm_num_feature_msrs(kvm_fd, 1);
- TEST_ASSERT(res > 1, "Expecting nmsrs to be > 1");
- TEST_ASSERT(res == old_res, "Expecting nmsrs to be identical");
- }
-
- feature_list = kvm_get_msr_feature_list(kvm_fd, old_res);
- TEST_ASSERT(old_res == feature_list->nmsrs,
- "Unmatching number of msr indexes");
+ (void)kvm_get_msr_index_list();
+ feature_list = kvm_get_feature_msr_index_list();
for (i = 0; i < feature_list->nmsrs; i++)
kvm_get_feature_msr(feature_list->indices[i]);
-
- free(feature_list);
- close(kvm_fd);
-}
-
-int main(int argc, char *argv[])
-{
- if (kvm_check_cap(KVM_CAP_GET_MSR_FEATURES))
- test_get_msr_feature();
-
- test_get_msr_index();
}
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c
index e0b2bb1339b1..d576bc8ce823 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c
@@ -44,7 +44,7 @@ static inline void nop_loop(void)
{
int i;
- for (i = 0; i < 1000000; i++)
+ for (i = 0; i < 100000000; i++)
asm volatile("nop");
}
@@ -56,12 +56,14 @@ static inline void check_tsc_msr_rdtsc(void)
tsc_freq = rdmsr(HV_X64_MSR_TSC_FREQUENCY);
GUEST_ASSERT(tsc_freq > 0);
- /* First, check MSR-based clocksource */
+ /* For increased accuracy, take mean rdtsc() before and afrer rdmsr() */
r1 = rdtsc();
t1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
+ r1 = (r1 + rdtsc()) / 2;
nop_loop();
r2 = rdtsc();
t2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
+ r2 = (r2 + rdtsc()) / 2;
GUEST_ASSERT(r2 > r1 && t2 > t1);
@@ -171,22 +173,22 @@ static void guest_main(struct ms_hyperv_tsc_page *tsc_page, vm_paddr_t tsc_page_
GUEST_DONE();
}
-#define VCPU_ID 0
-
-static void host_check_tsc_msr_rdtsc(struct kvm_vm *vm)
+static void host_check_tsc_msr_rdtsc(struct kvm_vcpu *vcpu)
{
u64 tsc_freq, r1, r2, t1, t2;
s64 delta_ns;
- tsc_freq = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TSC_FREQUENCY);
+ tsc_freq = vcpu_get_msr(vcpu, HV_X64_MSR_TSC_FREQUENCY);
TEST_ASSERT(tsc_freq > 0, "TSC frequency must be nonzero");
- /* First, check MSR-based clocksource */
+ /* For increased accuracy, take mean rdtsc() before and afrer ioctl */
r1 = rdtsc();
- t1 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT);
+ t1 = vcpu_get_msr(vcpu, HV_X64_MSR_TIME_REF_COUNT);
+ r1 = (r1 + rdtsc()) / 2;
nop_loop();
r2 = rdtsc();
- t2 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT);
+ t2 = vcpu_get_msr(vcpu, HV_X64_MSR_TIME_REF_COUNT);
+ r2 = (r2 + rdtsc()) / 2;
TEST_ASSERT(t2 > t1, "Time reference MSR is not monotonic (%ld <= %ld)", t1, t2);
@@ -203,36 +205,36 @@ static void host_check_tsc_msr_rdtsc(struct kvm_vm *vm)
int main(void)
{
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
struct ucall uc;
vm_vaddr_t tsc_page_gva;
int stage;
- vm = vm_create_default(VCPU_ID, 0, guest_main);
- run = vcpu_state(vm, VCPU_ID);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_main);
+ run = vcpu->run;
- vcpu_set_hv_cpuid(vm, VCPU_ID);
+ vcpu_set_hv_cpuid(vcpu);
tsc_page_gva = vm_vaddr_alloc_page(vm);
memset(addr_gva2hva(vm, tsc_page_gva), 0x0, getpagesize());
TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0,
"TSC page has to be page aligned\n");
- vcpu_args_set(vm, VCPU_ID, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva));
+ vcpu_args_set(vcpu, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva));
- host_check_tsc_msr_rdtsc(vm);
+ host_check_tsc_msr_rdtsc(vcpu);
for (stage = 1;; stage++) {
- _vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
- __FILE__, uc.args[1]);
+ REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
break;
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
index 8c245ab2d98a..e804eb08dff9 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
@@ -20,8 +20,6 @@
#include "processor.h"
#include "vmx.h"
-#define VCPU_ID 0
-
static void guest_code(void)
{
}
@@ -45,7 +43,7 @@ static bool smt_possible(void)
return res;
}
-static void test_hv_cpuid(struct kvm_cpuid2 *hv_cpuid_entries,
+static void test_hv_cpuid(const struct kvm_cpuid2 *hv_cpuid_entries,
bool evmcs_expected)
{
int i;
@@ -58,7 +56,7 @@ static void test_hv_cpuid(struct kvm_cpuid2 *hv_cpuid_entries,
nent_expected, hv_cpuid_entries->nent);
for (i = 0; i < hv_cpuid_entries->nent; i++) {
- struct kvm_cpuid_entry2 *entry = &hv_cpuid_entries->entries[i];
+ const struct kvm_cpuid_entry2 *entry = &hv_cpuid_entries->entries[i];
TEST_ASSERT((entry->function >= 0x40000000) &&
(entry->function <= 0x40000082),
@@ -115,64 +113,62 @@ static void test_hv_cpuid(struct kvm_cpuid2 *hv_cpuid_entries,
}
}
-void test_hv_cpuid_e2big(struct kvm_vm *vm, bool system)
+void test_hv_cpuid_e2big(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
{
static struct kvm_cpuid2 cpuid = {.nent = 0};
int ret;
- if (!system)
- ret = _vcpu_ioctl(vm, VCPU_ID, KVM_GET_SUPPORTED_HV_CPUID, &cpuid);
+ if (vcpu)
+ ret = __vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, &cpuid);
else
- ret = _kvm_ioctl(vm, KVM_GET_SUPPORTED_HV_CPUID, &cpuid);
+ ret = __kvm_ioctl(vm->kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, &cpuid);
TEST_ASSERT(ret == -1 && errno == E2BIG,
"%s KVM_GET_SUPPORTED_HV_CPUID didn't fail with -E2BIG when"
- " it should have: %d %d", system ? "KVM" : "vCPU", ret, errno);
+ " it should have: %d %d", !vcpu ? "KVM" : "vCPU", ret, errno);
}
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
- struct kvm_cpuid2 *hv_cpuid_entries;
+ const struct kvm_cpuid2 *hv_cpuid_entries;
+ struct kvm_vcpu *vcpu;
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
- if (!kvm_check_cap(KVM_CAP_HYPERV_CPUID)) {
- print_skip("KVM_CAP_HYPERV_CPUID not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_CPUID));
- vm = vm_create_default(VCPU_ID, 0, guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
/* Test vCPU ioctl version */
- test_hv_cpuid_e2big(vm, false);
+ test_hv_cpuid_e2big(vm, vcpu);
- hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vm, VCPU_ID);
+ hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vcpu);
test_hv_cpuid(hv_cpuid_entries, false);
- free(hv_cpuid_entries);
+ free((void *)hv_cpuid_entries);
- if (!nested_vmx_supported() ||
- !kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
+ if (!kvm_cpu_has(X86_FEATURE_VMX) ||
+ !kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
print_skip("Enlightened VMCS is unsupported");
goto do_sys;
}
- vcpu_enable_evmcs(vm, VCPU_ID);
- hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vm, VCPU_ID);
+ vcpu_enable_evmcs(vcpu);
+ hv_cpuid_entries = vcpu_get_supported_hv_cpuid(vcpu);
test_hv_cpuid(hv_cpuid_entries, true);
- free(hv_cpuid_entries);
+ free((void *)hv_cpuid_entries);
do_sys:
/* Test system ioctl version */
- if (!kvm_check_cap(KVM_CAP_SYS_HYPERV_CPUID)) {
+ if (!kvm_has_cap(KVM_CAP_SYS_HYPERV_CPUID)) {
print_skip("KVM_CAP_SYS_HYPERV_CPUID not supported");
goto out;
}
- test_hv_cpuid_e2big(vm, true);
+ test_hv_cpuid_e2big(vm, NULL);
hv_cpuid_entries = kvm_get_supported_hv_cpuid();
- test_hv_cpuid(hv_cpuid_entries, nested_vmx_supported());
+ test_hv_cpuid(hv_cpuid_entries, kvm_cpu_has(X86_FEATURE_VMX));
out:
kvm_vm_free(vm);
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
index 672915ce73d8..79ab0152d281 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
@@ -13,78 +13,22 @@
#include "processor.h"
#include "hyperv.h"
-#define VCPU_ID 0
#define LINUX_OS_ID ((u64)0x8100 << 48)
-extern unsigned char rdmsr_start;
-extern unsigned char rdmsr_end;
-
-static u64 do_rdmsr(u32 idx)
-{
- u32 lo, hi;
-
- asm volatile("rdmsr_start: rdmsr;"
- "rdmsr_end:"
- : "=a"(lo), "=c"(hi)
- : "c"(idx));
-
- return (((u64) hi) << 32) | lo;
-}
-
-extern unsigned char wrmsr_start;
-extern unsigned char wrmsr_end;
-
-static void do_wrmsr(u32 idx, u64 val)
-{
- u32 lo, hi;
-
- lo = val;
- hi = val >> 32;
-
- asm volatile("wrmsr_start: wrmsr;"
- "wrmsr_end:"
- : : "a"(lo), "c"(idx), "d"(hi));
-}
-
-static int nr_gp;
-static int nr_ud;
-
-static inline u64 hypercall(u64 control, vm_vaddr_t input_address,
- vm_vaddr_t output_address)
-{
- u64 hv_status;
-
- asm volatile("mov %3, %%r8\n"
- "vmcall"
- : "=a" (hv_status),
- "+c" (control), "+d" (input_address)
- : "r" (output_address)
- : "cc", "memory", "r8", "r9", "r10", "r11");
-
- return hv_status;
-}
-
-static void guest_gp_handler(struct ex_regs *regs)
+static inline uint8_t hypercall(u64 control, vm_vaddr_t input_address,
+ vm_vaddr_t output_address, uint64_t *hv_status)
{
- unsigned char *rip = (unsigned char *)regs->rip;
- bool r, w;
-
- r = rip == &rdmsr_start;
- w = rip == &wrmsr_start;
- GUEST_ASSERT(r || w);
-
- nr_gp++;
-
- if (r)
- regs->rip = (uint64_t)&rdmsr_end;
- else
- regs->rip = (uint64_t)&wrmsr_end;
-}
-
-static void guest_ud_handler(struct ex_regs *regs)
-{
- nr_ud++;
- regs->rip += 3;
+ uint8_t vector;
+
+ /* Note both the hypercall and the "asm safe" clobber r9-r11. */
+ asm volatile("mov %[output_address], %%r8\n\t"
+ KVM_ASM_SAFE("vmcall")
+ : "=a" (*hv_status),
+ "+c" (control), "+d" (input_address),
+ KVM_ASM_SAFE_OUTPUTS(vector)
+ : [output_address] "r"(output_address)
+ : "cc", "memory", "r8", KVM_ASM_SAFE_CLOBBERS);
+ return vector;
}
struct msr_data {
@@ -102,111 +46,105 @@ struct hcall_data {
static void guest_msr(struct msr_data *msr)
{
- int i = 0;
-
- while (msr->idx) {
- WRITE_ONCE(nr_gp, 0);
- if (!msr->write)
- do_rdmsr(msr->idx);
- else
- do_wrmsr(msr->idx, msr->write_val);
+ uint64_t ignored;
+ uint8_t vector;
- if (msr->available)
- GUEST_ASSERT(READ_ONCE(nr_gp) == 0);
- else
- GUEST_ASSERT(READ_ONCE(nr_gp) == 1);
+ GUEST_ASSERT(msr->idx);
- GUEST_SYNC(i++);
- }
+ if (!msr->write)
+ vector = rdmsr_safe(msr->idx, &ignored);
+ else
+ vector = wrmsr_safe(msr->idx, msr->write_val);
+ if (msr->available)
+ GUEST_ASSERT_2(!vector, msr->idx, vector);
+ else
+ GUEST_ASSERT_2(vector == GP_VECTOR, msr->idx, vector);
GUEST_DONE();
}
static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
{
- int i = 0;
u64 res, input, output;
+ uint8_t vector;
+
+ GUEST_ASSERT(hcall->control);
wrmsr(HV_X64_MSR_GUEST_OS_ID, LINUX_OS_ID);
wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
- while (hcall->control) {
- nr_ud = 0;
- if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) {
- input = pgs_gpa;
- output = pgs_gpa + 4096;
- } else {
- input = output = 0;
- }
-
- res = hypercall(hcall->control, input, output);
- if (hcall->ud_expected)
- GUEST_ASSERT(nr_ud == 1);
- else
- GUEST_ASSERT(res == hcall->expect);
-
- GUEST_SYNC(i++);
+ if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) {
+ input = pgs_gpa;
+ output = pgs_gpa + 4096;
+ } else {
+ input = output = 0;
}
+ vector = hypercall(hcall->control, input, output, &res);
+ if (hcall->ud_expected)
+ GUEST_ASSERT_2(vector == UD_VECTOR, hcall->control, vector);
+ else
+ GUEST_ASSERT_2(!vector, hcall->control, vector);
+
+ GUEST_ASSERT_2(!hcall->ud_expected || res == hcall->expect,
+ hcall->expect, res);
GUEST_DONE();
}
-static void hv_set_cpuid(struct kvm_vm *vm, struct kvm_cpuid2 *cpuid,
- struct kvm_cpuid_entry2 *feat,
- struct kvm_cpuid_entry2 *recomm,
- struct kvm_cpuid_entry2 *dbg)
+static void vcpu_reset_hv_cpuid(struct kvm_vcpu *vcpu)
{
- TEST_ASSERT(set_cpuid(cpuid, feat),
- "failed to set KVM_CPUID_FEATURES leaf");
- TEST_ASSERT(set_cpuid(cpuid, recomm),
- "failed to set HYPERV_CPUID_ENLIGHTMENT_INFO leaf");
- TEST_ASSERT(set_cpuid(cpuid, dbg),
- "failed to set HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES leaf");
- vcpu_set_cpuid(vm, VCPU_ID, cpuid);
+ /*
+ * Enable all supported Hyper-V features, then clear the leafs holding
+ * the features that will be tested one by one.
+ */
+ vcpu_set_hv_cpuid(vcpu);
+
+ vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
+ vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO);
+ vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
}
static void guest_test_msrs_access(void)
{
+ struct kvm_cpuid2 *prev_cpuid = NULL;
+ struct kvm_cpuid_entry2 *feat, *dbg;
+ struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vm *vm;
struct ucall uc;
- int stage = 0, r;
- struct kvm_cpuid_entry2 feat = {
- .function = HYPERV_CPUID_FEATURES
- };
- struct kvm_cpuid_entry2 recomm = {
- .function = HYPERV_CPUID_ENLIGHTMENT_INFO
- };
- struct kvm_cpuid_entry2 dbg = {
- .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
- };
- struct kvm_cpuid2 *best;
+ int stage = 0;
vm_vaddr_t msr_gva;
- struct kvm_enable_cap cap = {
- .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
- .args = {1}
- };
struct msr_data *msr;
while (true) {
- vm = vm_create_default(VCPU_ID, 0, guest_msr);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_msr);
msr_gva = vm_vaddr_alloc_page(vm);
memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
msr = addr_gva2hva(vm, msr_gva);
- vcpu_args_set(vm, VCPU_ID, 1, msr_gva);
- vcpu_enable_cap(vm, VCPU_ID, &cap);
+ vcpu_args_set(vcpu, 1, msr_gva);
+ vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
- vcpu_set_hv_cpuid(vm, VCPU_ID);
+ if (!prev_cpuid) {
+ vcpu_reset_hv_cpuid(vcpu);
- best = kvm_get_supported_hv_cpuid();
+ prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent);
+ } else {
+ vcpu_init_cpuid(vcpu, prev_cpuid);
+ }
+
+ feat = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
+ dbg = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, VCPU_ID);
- vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
+ vcpu_init_descriptor_tables(vcpu);
- run = vcpu_state(vm, VCPU_ID);
+ run = vcpu->run;
+
+ /* TODO: Make this entire test easier to maintain. */
+ if (stage >= 21)
+ vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_SYNIC2, 0);
switch (stage) {
case 0:
@@ -223,7 +161,7 @@ static void guest_test_msrs_access(void)
msr->available = 0;
break;
case 2:
- feat.eax |= HV_MSR_HYPERCALL_AVAILABLE;
+ feat->eax |= HV_MSR_HYPERCALL_AVAILABLE;
/*
* HV_X64_MSR_GUEST_OS_ID has to be written first to make
* HV_X64_MSR_HYPERCALL available.
@@ -250,12 +188,14 @@ static void guest_test_msrs_access(void)
msr->available = 0;
break;
case 6:
- feat.eax |= HV_MSR_VP_RUNTIME_AVAILABLE;
+ feat->eax |= HV_MSR_VP_RUNTIME_AVAILABLE;
+ msr->idx = HV_X64_MSR_VP_RUNTIME;
msr->write = 0;
msr->available = 1;
break;
case 7:
/* Read only */
+ msr->idx = HV_X64_MSR_VP_RUNTIME;
msr->write = 1;
msr->write_val = 1;
msr->available = 0;
@@ -267,12 +207,14 @@ static void guest_test_msrs_access(void)
msr->available = 0;
break;
case 9:
- feat.eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
+ feat->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
+ msr->idx = HV_X64_MSR_TIME_REF_COUNT;
msr->write = 0;
msr->available = 1;
break;
case 10:
/* Read only */
+ msr->idx = HV_X64_MSR_TIME_REF_COUNT;
msr->write = 1;
msr->write_val = 1;
msr->available = 0;
@@ -284,12 +226,14 @@ static void guest_test_msrs_access(void)
msr->available = 0;
break;
case 12:
- feat.eax |= HV_MSR_VP_INDEX_AVAILABLE;
+ feat->eax |= HV_MSR_VP_INDEX_AVAILABLE;
+ msr->idx = HV_X64_MSR_VP_INDEX;
msr->write = 0;
msr->available = 1;
break;
case 13:
/* Read only */
+ msr->idx = HV_X64_MSR_VP_INDEX;
msr->write = 1;
msr->write_val = 1;
msr->available = 0;
@@ -301,11 +245,13 @@ static void guest_test_msrs_access(void)
msr->available = 0;
break;
case 15:
- feat.eax |= HV_MSR_RESET_AVAILABLE;
+ feat->eax |= HV_MSR_RESET_AVAILABLE;
+ msr->idx = HV_X64_MSR_RESET;
msr->write = 0;
msr->available = 1;
break;
case 16:
+ msr->idx = HV_X64_MSR_RESET;
msr->write = 1;
msr->write_val = 0;
msr->available = 1;
@@ -317,11 +263,13 @@ static void guest_test_msrs_access(void)
msr->available = 0;
break;
case 18:
- feat.eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
+ feat->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
+ msr->idx = HV_X64_MSR_REFERENCE_TSC;
msr->write = 0;
msr->available = 1;
break;
case 19:
+ msr->idx = HV_X64_MSR_REFERENCE_TSC;
msr->write = 1;
msr->write_val = 0;
msr->available = 1;
@@ -337,16 +285,18 @@ static void guest_test_msrs_access(void)
* Remains unavailable even with KVM_CAP_HYPERV_SYNIC2
* capability enabled and guest visible CPUID bit unset.
*/
- cap.cap = KVM_CAP_HYPERV_SYNIC2;
- cap.args[0] = 0;
- vcpu_enable_cap(vm, VCPU_ID, &cap);
+ msr->idx = HV_X64_MSR_EOM;
+ msr->write = 0;
+ msr->available = 0;
break;
case 22:
- feat.eax |= HV_MSR_SYNIC_AVAILABLE;
+ feat->eax |= HV_MSR_SYNIC_AVAILABLE;
+ msr->idx = HV_X64_MSR_EOM;
msr->write = 0;
msr->available = 1;
break;
case 23:
+ msr->idx = HV_X64_MSR_EOM;
msr->write = 1;
msr->write_val = 0;
msr->available = 1;
@@ -358,23 +308,29 @@ static void guest_test_msrs_access(void)
msr->available = 0;
break;
case 25:
- feat.eax |= HV_MSR_SYNTIMER_AVAILABLE;
+ feat->eax |= HV_MSR_SYNTIMER_AVAILABLE;
+ msr->idx = HV_X64_MSR_STIMER0_CONFIG;
msr->write = 0;
msr->available = 1;
break;
case 26:
+ msr->idx = HV_X64_MSR_STIMER0_CONFIG;
msr->write = 1;
msr->write_val = 0;
msr->available = 1;
break;
case 27:
/* Direct mode test */
+ msr->idx = HV_X64_MSR_STIMER0_CONFIG;
msr->write = 1;
msr->write_val = 1 << 12;
msr->available = 0;
break;
case 28:
- feat.edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
+ feat->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
+ msr->idx = HV_X64_MSR_STIMER0_CONFIG;
+ msr->write = 1;
+ msr->write_val = 1 << 12;
msr->available = 1;
break;
@@ -384,7 +340,8 @@ static void guest_test_msrs_access(void)
msr->available = 0;
break;
case 30:
- feat.eax |= HV_MSR_APIC_ACCESS_AVAILABLE;
+ feat->eax |= HV_MSR_APIC_ACCESS_AVAILABLE;
+ msr->idx = HV_X64_MSR_EOI;
msr->write = 1;
msr->write_val = 1;
msr->available = 1;
@@ -396,12 +353,14 @@ static void guest_test_msrs_access(void)
msr->available = 0;
break;
case 32:
- feat.eax |= HV_ACCESS_FREQUENCY_MSRS;
+ feat->eax |= HV_ACCESS_FREQUENCY_MSRS;
+ msr->idx = HV_X64_MSR_TSC_FREQUENCY;
msr->write = 0;
msr->available = 1;
break;
case 33:
/* Read only */
+ msr->idx = HV_X64_MSR_TSC_FREQUENCY;
msr->write = 1;
msr->write_val = 1;
msr->available = 0;
@@ -413,11 +372,13 @@ static void guest_test_msrs_access(void)
msr->available = 0;
break;
case 35:
- feat.eax |= HV_ACCESS_REENLIGHTENMENT;
+ feat->eax |= HV_ACCESS_REENLIGHTENMENT;
+ msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
msr->write = 0;
msr->available = 1;
break;
case 36:
+ msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
msr->write = 1;
msr->write_val = 1;
msr->available = 1;
@@ -436,11 +397,13 @@ static void guest_test_msrs_access(void)
msr->available = 0;
break;
case 39:
- feat.edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
+ feat->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
+ msr->idx = HV_X64_MSR_CRASH_P0;
msr->write = 0;
msr->available = 1;
break;
case 40:
+ msr->idx = HV_X64_MSR_CRASH_P0;
msr->write = 1;
msr->write_val = 1;
msr->available = 1;
@@ -452,48 +415,44 @@ static void guest_test_msrs_access(void)
msr->available = 0;
break;
case 42:
- feat.edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
- dbg.eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
+ feat->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
+ dbg->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
+ msr->idx = HV_X64_MSR_SYNDBG_STATUS;
msr->write = 0;
msr->available = 1;
break;
case 43:
+ msr->idx = HV_X64_MSR_SYNDBG_STATUS;
msr->write = 1;
msr->write_val = 0;
msr->available = 1;
break;
case 44:
- /* END */
- msr->idx = 0;
- break;
+ kvm_vm_free(vm);
+ return;
}
- hv_set_cpuid(vm, best, &feat, &recomm, &dbg);
+ vcpu_set_cpuid(vcpu);
+
+ memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent));
- if (msr->idx)
- pr_debug("Stage %d: testing msr: 0x%x for %s\n", stage,
- msr->idx, msr->write ? "write" : "read");
- else
- pr_debug("Stage %d: finish\n", stage);
+ pr_debug("Stage %d: testing msr: 0x%x for %s\n", stage,
+ msr->idx, msr->write ? "write" : "read");
- r = _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(!r, "vcpu_run failed: %d\n", r);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"unexpected exit reason: %u (%s)",
run->exit_reason, exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
- case UCALL_SYNC:
- TEST_ASSERT(uc.args[1] == 0,
- "Unexpected stage: %ld (0 expected)\n",
- uc.args[1]);
- break;
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
- __FILE__, uc.args[1]);
+ REPORT_GUEST_ASSERT_2(uc, "MSR = %lx, vector = %lx");
return;
case UCALL_DONE:
+ break;
+ default:
+ TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
return;
}
@@ -504,54 +463,50 @@ static void guest_test_msrs_access(void)
static void guest_test_hcalls_access(void)
{
+ struct kvm_cpuid_entry2 *feat, *recomm, *dbg;
+ struct kvm_cpuid2 *prev_cpuid = NULL;
+ struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct kvm_vm *vm;
struct ucall uc;
- int stage = 0, r;
- struct kvm_cpuid_entry2 feat = {
- .function = HYPERV_CPUID_FEATURES,
- .eax = HV_MSR_HYPERCALL_AVAILABLE
- };
- struct kvm_cpuid_entry2 recomm = {
- .function = HYPERV_CPUID_ENLIGHTMENT_INFO
- };
- struct kvm_cpuid_entry2 dbg = {
- .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
- };
- struct kvm_enable_cap cap = {
- .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
- .args = {1}
- };
+ int stage = 0;
vm_vaddr_t hcall_page, hcall_params;
struct hcall_data *hcall;
- struct kvm_cpuid2 *best;
while (true) {
- vm = vm_create_default(VCPU_ID, 0, guest_hcall);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_hcall);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, VCPU_ID);
- vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
+ vcpu_init_descriptor_tables(vcpu);
/* Hypercall input/output */
hcall_page = vm_vaddr_alloc_pages(vm, 2);
- hcall = addr_gva2hva(vm, hcall_page);
memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
hcall_params = vm_vaddr_alloc_page(vm);
memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
+ hcall = addr_gva2hva(vm, hcall_params);
+
+ vcpu_args_set(vcpu, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
+ vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
- vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
- vcpu_enable_cap(vm, VCPU_ID, &cap);
+ if (!prev_cpuid) {
+ vcpu_reset_hv_cpuid(vcpu);
- vcpu_set_hv_cpuid(vm, VCPU_ID);
+ prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent);
+ } else {
+ vcpu_init_cpuid(vcpu, prev_cpuid);
+ }
- best = kvm_get_supported_hv_cpuid();
+ feat = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
+ recomm = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO);
+ dbg = vcpu_get_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
- run = vcpu_state(vm, VCPU_ID);
+ run = vcpu->run;
switch (stage) {
case 0:
+ feat->eax |= HV_MSR_HYPERCALL_AVAILABLE;
hcall->control = 0xdeadbeef;
hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
break;
@@ -561,7 +516,8 @@ static void guest_test_hcalls_access(void)
hcall->expect = HV_STATUS_ACCESS_DENIED;
break;
case 2:
- feat.ebx |= HV_POST_MESSAGES;
+ feat->ebx |= HV_POST_MESSAGES;
+ hcall->control = HVCALL_POST_MESSAGE;
hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
@@ -570,7 +526,8 @@ static void guest_test_hcalls_access(void)
hcall->expect = HV_STATUS_ACCESS_DENIED;
break;
case 4:
- feat.ebx |= HV_SIGNAL_EVENTS;
+ feat->ebx |= HV_SIGNAL_EVENTS;
+ hcall->control = HVCALL_SIGNAL_EVENT;
hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
@@ -579,11 +536,13 @@ static void guest_test_hcalls_access(void)
hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
break;
case 6:
- dbg.eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
+ dbg->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
+ hcall->control = HVCALL_RESET_DEBUG_SESSION;
hcall->expect = HV_STATUS_ACCESS_DENIED;
break;
case 7:
- feat.ebx |= HV_DEBUGGING;
+ feat->ebx |= HV_DEBUGGING;
+ hcall->control = HVCALL_RESET_DEBUG_SESSION;
hcall->expect = HV_STATUS_OPERATION_DENIED;
break;
@@ -592,7 +551,8 @@ static void guest_test_hcalls_access(void)
hcall->expect = HV_STATUS_ACCESS_DENIED;
break;
case 9:
- recomm.eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
+ recomm->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
+ hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
hcall->expect = HV_STATUS_SUCCESS;
break;
case 10:
@@ -600,7 +560,8 @@ static void guest_test_hcalls_access(void)
hcall->expect = HV_STATUS_ACCESS_DENIED;
break;
case 11:
- recomm.eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
+ recomm->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
+ hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
hcall->expect = HV_STATUS_SUCCESS;
break;
@@ -609,7 +570,8 @@ static void guest_test_hcalls_access(void)
hcall->expect = HV_STATUS_ACCESS_DENIED;
break;
case 13:
- recomm.eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
+ recomm->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
+ hcall->control = HVCALL_SEND_IPI;
hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
case 14:
@@ -623,7 +585,8 @@ static void guest_test_hcalls_access(void)
hcall->expect = HV_STATUS_ACCESS_DENIED;
break;
case 16:
- recomm.ebx = 0xfff;
+ recomm->ebx = 0xfff;
+ hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
hcall->expect = HV_STATUS_SUCCESS;
break;
case 17:
@@ -632,42 +595,35 @@ static void guest_test_hcalls_access(void)
hcall->ud_expected = true;
break;
case 18:
- feat.edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
+ feat->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
+ hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
hcall->ud_expected = false;
hcall->expect = HV_STATUS_SUCCESS;
break;
-
case 19:
- /* END */
- hcall->control = 0;
- break;
+ kvm_vm_free(vm);
+ return;
}
- hv_set_cpuid(vm, best, &feat, &recomm, &dbg);
+ vcpu_set_cpuid(vcpu);
+
+ memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent));
- if (hcall->control)
- pr_debug("Stage %d: testing hcall: 0x%lx\n", stage,
- hcall->control);
- else
- pr_debug("Stage %d: finish\n", stage);
+ pr_debug("Stage %d: testing hcall: 0x%lx\n", stage, hcall->control);
- r = _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(!r, "vcpu_run failed: %d\n", r);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"unexpected exit reason: %u (%s)",
run->exit_reason, exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
- case UCALL_SYNC:
- TEST_ASSERT(uc.args[1] == 0,
- "Unexpected stage: %ld (0 expected)\n",
- uc.args[1]);
- break;
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
- __FILE__, uc.args[1]);
+ REPORT_GUEST_ASSERT_2(uc, "arg1 = %lx, arg2 = %lx");
return;
case UCALL_DONE:
+ break;
+ default:
+ TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
return;
}
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
index 21f5ca9197da..a380ad7bb9b3 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
@@ -21,7 +21,6 @@
#include "svm_util.h"
#include "hyperv.h"
-#define VCPU_ID 1
#define L2_GUEST_STACK_SIZE 256
struct hv_enlightenments {
@@ -42,11 +41,6 @@ struct hv_enlightenments {
*/
#define VMCB_HV_NESTED_ENLIGHTENMENTS (1U << 31)
-static inline void vmmcall(void)
-{
- __asm__ __volatile__("vmmcall");
-}
-
void l2_guest_code(void)
{
GUEST_SYNC(3);
@@ -127,33 +121,31 @@ int main(int argc, char *argv[])
{
vm_vaddr_t nested_gva = 0;
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
struct ucall uc;
int stage;
- if (!nested_svm_supported()) {
- print_skip("Nested SVM not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
+
/* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code);
- vcpu_set_hv_cpuid(vm, VCPU_ID);
- run = vcpu_state(vm, VCPU_ID);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+ vcpu_set_hv_cpuid(vcpu);
+ run = vcpu->run;
vcpu_alloc_svm(vm, &nested_gva);
- vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
+ vcpu_args_set(vcpu, 1, nested_gva);
for (stage = 1;; stage++) {
- _vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
- __FILE__, uc.args[1]);
+ REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
break;
diff --git a/tools/testing/selftests/kvm/x86_64/kvm_clock_test.c b/tools/testing/selftests/kvm/x86_64/kvm_clock_test.c
index 97731454f3f3..813ce282cf56 100644
--- a/tools/testing/selftests/kvm/x86_64/kvm_clock_test.c
+++ b/tools/testing/selftests/kvm/x86_64/kvm_clock_test.c
@@ -16,8 +16,6 @@
#include "kvm_util.h"
#include "processor.h"
-#define VCPU_ID 0
-
struct test_case {
uint64_t kvmclock_base;
int64_t realtime_offset;
@@ -73,8 +71,7 @@ static void handle_sync(struct ucall *uc, struct kvm_clock_data *start,
static void handle_abort(struct ucall *uc)
{
- TEST_FAIL("%s at %s:%ld", (const char *)uc->args[0],
- __FILE__, uc->args[1]);
+ REPORT_GUEST_ASSERT(*uc);
}
static void setup_clock(struct kvm_vm *vm, struct test_case *test_case)
@@ -105,29 +102,27 @@ static void setup_clock(struct kvm_vm *vm, struct test_case *test_case)
vm_ioctl(vm, KVM_SET_CLOCK, &data);
}
-static void enter_guest(struct kvm_vm *vm)
+static void enter_guest(struct kvm_vcpu *vcpu)
{
struct kvm_clock_data start, end;
- struct kvm_run *run;
+ struct kvm_run *run = vcpu->run;
+ struct kvm_vm *vm = vcpu->vm;
struct ucall uc;
- int i, r;
-
- run = vcpu_state(vm, VCPU_ID);
+ int i;
for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
setup_clock(vm, &test_cases[i]);
vm_ioctl(vm, KVM_GET_CLOCK, &start);
- r = _vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
vm_ioctl(vm, KVM_GET_CLOCK, &end);
- TEST_ASSERT(!r, "vcpu_run failed: %d\n", r);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"unexpected exit reason: %u (%s)",
run->exit_reason, exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
handle_sync(&uc, &start, &end);
break;
@@ -178,26 +173,23 @@ out:
int main(void)
{
+ struct kvm_vcpu *vcpu;
vm_vaddr_t pvti_gva;
vm_paddr_t pvti_gpa;
struct kvm_vm *vm;
int flags;
flags = kvm_check_cap(KVM_CAP_ADJUST_CLOCK);
- if (!(flags & KVM_CLOCK_REALTIME)) {
- print_skip("KVM_CLOCK_REALTIME not supported; flags: %x",
- flags);
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(flags & KVM_CLOCK_REALTIME);
check_clocksource();
- vm = vm_create_default(VCPU_ID, 0, guest_main);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_main);
pvti_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000);
pvti_gpa = addr_gva2gpa(vm, pvti_gva);
- vcpu_args_set(vm, VCPU_ID, 2, pvti_gpa, pvti_gva);
+ vcpu_args_set(vcpu, 2, pvti_gpa, pvti_gva);
- enter_guest(vm);
+ enter_guest(vcpu);
kvm_vm_free(vm);
}
diff --git a/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c b/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c
index 04ed975662c9..619655c1a1f3 100644
--- a/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c
+++ b/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c
@@ -12,55 +12,6 @@
#include "kvm_util.h"
#include "processor.h"
-extern unsigned char rdmsr_start;
-extern unsigned char rdmsr_end;
-
-static u64 do_rdmsr(u32 idx)
-{
- u32 lo, hi;
-
- asm volatile("rdmsr_start: rdmsr;"
- "rdmsr_end:"
- : "=a"(lo), "=c"(hi)
- : "c"(idx));
-
- return (((u64) hi) << 32) | lo;
-}
-
-extern unsigned char wrmsr_start;
-extern unsigned char wrmsr_end;
-
-static void do_wrmsr(u32 idx, u64 val)
-{
- u32 lo, hi;
-
- lo = val;
- hi = val >> 32;
-
- asm volatile("wrmsr_start: wrmsr;"
- "wrmsr_end:"
- : : "a"(lo), "c"(idx), "d"(hi));
-}
-
-static int nr_gp;
-
-static void guest_gp_handler(struct ex_regs *regs)
-{
- unsigned char *rip = (unsigned char *)regs->rip;
- bool r, w;
-
- r = rip == &rdmsr_start;
- w = rip == &wrmsr_start;
- GUEST_ASSERT(r || w);
-
- nr_gp++;
-
- if (r)
- regs->rip = (uint64_t)&rdmsr_end;
- else
- regs->rip = (uint64_t)&wrmsr_end;
-}
-
struct msr_data {
uint32_t idx;
const char *name;
@@ -89,14 +40,16 @@ static struct msr_data msrs_to_test[] = {
static void test_msr(struct msr_data *msr)
{
+ uint64_t ignored;
+ uint8_t vector;
+
PR_MSR(msr);
- do_rdmsr(msr->idx);
- GUEST_ASSERT(READ_ONCE(nr_gp) == 1);
- nr_gp = 0;
- do_wrmsr(msr->idx, 0);
- GUEST_ASSERT(READ_ONCE(nr_gp) == 1);
- nr_gp = 0;
+ vector = rdmsr_safe(msr->idx, &ignored);
+ GUEST_ASSERT_1(vector == GP_VECTOR, vector);
+
+ vector = wrmsr_safe(msr->idx, 0);
+ GUEST_ASSERT_1(vector == GP_VECTOR, vector);
}
struct hcall_data {
@@ -142,15 +95,6 @@ static void guest_main(void)
GUEST_DONE();
}
-static void clear_kvm_cpuid_features(struct kvm_cpuid2 *cpuid)
-{
- struct kvm_cpuid_entry2 ent = {0};
-
- ent.function = KVM_CPUID_FEATURES;
- TEST_ASSERT(set_cpuid(cpuid, &ent),
- "failed to clear KVM_CPUID_FEATURES leaf");
-}
-
static void pr_msr(struct ucall *uc)
{
struct msr_data *msr = (struct msr_data *)uc->args[0];
@@ -165,30 +109,18 @@ static void pr_hcall(struct ucall *uc)
pr_info("testing hcall: %s (%lu)\n", hc->name, hc->nr);
}
-static void handle_abort(struct ucall *uc)
+static void enter_guest(struct kvm_vcpu *vcpu)
{
- TEST_FAIL("%s at %s:%ld", (const char *)uc->args[0],
- __FILE__, uc->args[1]);
-}
-
-#define VCPU_ID 0
-
-static void enter_guest(struct kvm_vm *vm)
-{
- struct kvm_run *run;
+ struct kvm_run *run = vcpu->run;
struct ucall uc;
- int r;
-
- run = vcpu_state(vm, VCPU_ID);
while (true) {
- r = _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(!r, "vcpu_run failed: %d\n", r);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"unexpected exit reason: %u (%s)",
run->exit_reason, exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_PR_MSR:
pr_msr(&uc);
break;
@@ -196,7 +128,7 @@ static void enter_guest(struct kvm_vm *vm)
pr_hcall(&uc);
break;
case UCALL_ABORT:
- handle_abort(&uc);
+ REPORT_GUEST_ASSERT_1(uc, "vector = %lu");
return;
case UCALL_DONE:
return;
@@ -206,29 +138,20 @@ static void enter_guest(struct kvm_vm *vm)
int main(void)
{
- struct kvm_enable_cap cap = {0};
- struct kvm_cpuid2 *best;
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- if (!kvm_check_cap(KVM_CAP_ENFORCE_PV_FEATURE_CPUID)) {
- print_skip("KVM_CAP_ENFORCE_PV_FEATURE_CPUID not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_ENFORCE_PV_FEATURE_CPUID));
- vm = vm_create_default(VCPU_ID, 0, guest_main);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_main);
- cap.cap = KVM_CAP_ENFORCE_PV_FEATURE_CPUID;
- cap.args[0] = 1;
- vcpu_enable_cap(vm, VCPU_ID, &cap);
+ vcpu_enable_cap(vcpu, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 1);
- best = kvm_get_supported_cpuid();
- clear_kvm_cpuid_features(best);
- vcpu_set_cpuid(vm, VCPU_ID, best);
+ vcpu_clear_cpuid_entry(vcpu, KVM_CPUID_FEATURES);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, VCPU_ID);
- vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
+ vcpu_init_descriptor_tables(vcpu);
- enter_guest(vm);
+ enter_guest(vcpu);
kvm_vm_free(vm);
}
diff --git a/tools/testing/selftests/kvm/x86_64/max_vcpuid_cap_test.c b/tools/testing/selftests/kvm/x86_64/max_vcpuid_cap_test.c
new file mode 100644
index 000000000000..3cc4b86832fe
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/max_vcpuid_cap_test.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * maximum APIC ID capability tests
+ *
+ * Copyright (C) 2022, Intel, Inc.
+ *
+ * Tests for getting/setting maximum APIC ID capability
+ */
+
+#include "kvm_util.h"
+
+#define MAX_VCPU_ID 2
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vm *vm;
+ int ret;
+
+ vm = vm_create_barebones();
+
+ /* Get KVM_CAP_MAX_VCPU_ID cap supported in KVM */
+ ret = vm_check_cap(vm, KVM_CAP_MAX_VCPU_ID);
+
+ /* Try to set KVM_CAP_MAX_VCPU_ID beyond KVM cap */
+ ret = __vm_enable_cap(vm, KVM_CAP_MAX_VCPU_ID, ret + 1);
+ TEST_ASSERT(ret < 0,
+ "Setting KVM_CAP_MAX_VCPU_ID beyond KVM cap should fail");
+
+ /* Set KVM_CAP_MAX_VCPU_ID */
+ vm_enable_cap(vm, KVM_CAP_MAX_VCPU_ID, MAX_VCPU_ID);
+
+
+ /* Try to set KVM_CAP_MAX_VCPU_ID again */
+ ret = __vm_enable_cap(vm, KVM_CAP_MAX_VCPU_ID, MAX_VCPU_ID + 1);
+ TEST_ASSERT(ret < 0,
+ "Setting KVM_CAP_MAX_VCPU_ID multiple times should fail");
+
+ /* Create vCPU with id beyond KVM_CAP_MAX_VCPU_ID cap*/
+ ret = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)MAX_VCPU_ID);
+ TEST_ASSERT(ret < 0, "Creating vCPU with ID > MAX_VCPU_ID should fail");
+
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c b/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
index 9f55ccd169a1..fb02581953a3 100644
--- a/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
+++ b/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
@@ -59,10 +59,10 @@ void test(void)
kvm = open("/dev/kvm", O_RDWR);
TEST_ASSERT(kvm != -1, "failed to open /dev/kvm");
- kvmvm = ioctl(kvm, KVM_CREATE_VM, 0);
- TEST_ASSERT(kvmvm != -1, "KVM_CREATE_VM failed");
+ kvmvm = __kvm_ioctl(kvm, KVM_CREATE_VM, NULL);
+ TEST_ASSERT(kvmvm > 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, kvmvm));
kvmcpu = ioctl(kvmvm, KVM_CREATE_VCPU, 0);
- TEST_ASSERT(kvmcpu != -1, "KVM_CREATE_VCPU failed");
+ TEST_ASSERT(kvmcpu != -1, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, kvmcpu));
run = (struct kvm_run *)mmap(0, 4096, PROT_READ|PROT_WRITE, MAP_SHARED,
kvmcpu, 0);
tc.kvmcpu = kvmcpu;
@@ -93,15 +93,9 @@ int main(void)
{
int warnings_before, warnings_after;
- if (!is_intel_cpu()) {
- print_skip("Must be run on an Intel CPU");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(is_intel_cpu());
- if (vm_is_unrestricted_guest(NULL)) {
- print_skip("Unrestricted guest must be disabled");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(!vm_is_unrestricted_guest(NULL));
warnings_before = get_warnings_count();
diff --git a/tools/testing/selftests/kvm/x86_64/mmu_role_test.c b/tools/testing/selftests/kvm/x86_64/mmu_role_test.c
deleted file mode 100644
index da2325fcad87..000000000000
--- a/tools/testing/selftests/kvm/x86_64/mmu_role_test.c
+++ /dev/null
@@ -1,147 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include "kvm_util.h"
-#include "processor.h"
-
-#define VCPU_ID 1
-
-#define MMIO_GPA 0x100000000ull
-
-static void guest_code(void)
-{
- (void)READ_ONCE(*((uint64_t *)MMIO_GPA));
- (void)READ_ONCE(*((uint64_t *)MMIO_GPA));
-
- GUEST_ASSERT(0);
-}
-
-static void guest_pf_handler(struct ex_regs *regs)
-{
- /* PFEC == RSVD | PRESENT (read, kernel). */
- GUEST_ASSERT(regs->error_code == 0x9);
- GUEST_DONE();
-}
-
-static void mmu_role_test(u32 *cpuid_reg, u32 evil_cpuid_val)
-{
- u32 good_cpuid_val = *cpuid_reg;
- struct kvm_run *run;
- struct kvm_vm *vm;
- uint64_t cmd;
- int r;
-
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code);
- run = vcpu_state(vm, VCPU_ID);
-
- /* Map 1gb page without a backing memlot. */
- __virt_pg_map(vm, MMIO_GPA, MMIO_GPA, X86_PAGE_SIZE_1G);
-
- r = _vcpu_run(vm, VCPU_ID);
-
- /* Guest access to the 1gb page should trigger MMIO. */
- TEST_ASSERT(r == 0, "vcpu_run failed: %d\n", r);
- TEST_ASSERT(run->exit_reason == KVM_EXIT_MMIO,
- "Unexpected exit reason: %u (%s), expected MMIO exit (1gb page w/o memslot)\n",
- run->exit_reason, exit_reason_str(run->exit_reason));
-
- TEST_ASSERT(run->mmio.len == 8, "Unexpected exit mmio size = %u", run->mmio.len);
-
- TEST_ASSERT(run->mmio.phys_addr == MMIO_GPA,
- "Unexpected exit mmio address = 0x%llx", run->mmio.phys_addr);
-
- /*
- * Effect the CPUID change for the guest and re-enter the guest. Its
- * access should now #PF due to the PAGE_SIZE bit being reserved or
- * the resulting GPA being invalid. Note, kvm_get_supported_cpuid()
- * returns the struct that contains the entry being modified. Eww.
- */
- *cpuid_reg = evil_cpuid_val;
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
-
- /*
- * Add a dummy memslot to coerce KVM into bumping the MMIO generation.
- * KVM does not "officially" support mucking with CPUID after KVM_RUN,
- * and will incorrectly reuse MMIO SPTEs. Don't delete the memslot!
- * KVM x86 zaps all shadow pages on memslot deletion.
- */
- vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
- MMIO_GPA << 1, 10, 1, 0);
-
- /* Set up a #PF handler to eat the RSVD #PF and signal all done! */
- vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, VCPU_ID);
- vm_install_exception_handler(vm, PF_VECTOR, guest_pf_handler);
-
- r = _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(r == 0, "vcpu_run failed: %d\n", r);
-
- cmd = get_ucall(vm, VCPU_ID, NULL);
- TEST_ASSERT(cmd == UCALL_DONE,
- "Unexpected guest exit, exit_reason=%s, ucall.cmd = %lu\n",
- exit_reason_str(run->exit_reason), cmd);
-
- /*
- * Restore the happy CPUID value for the next test. Yes, changes are
- * indeed persistent across VM destruction.
- */
- *cpuid_reg = good_cpuid_val;
-
- kvm_vm_free(vm);
-}
-
-int main(int argc, char *argv[])
-{
- struct kvm_cpuid_entry2 *entry;
- int opt;
-
- /*
- * All tests are opt-in because TDP doesn't play nice with reserved #PF
- * in the GVA->GPA translation. The hardware page walker doesn't let
- * software change GBPAGES or MAXPHYADDR, and KVM doesn't manually walk
- * the GVA on fault for performance reasons.
- */
- bool do_gbpages = false;
- bool do_maxphyaddr = false;
-
- setbuf(stdout, NULL);
-
- while ((opt = getopt(argc, argv, "gm")) != -1) {
- switch (opt) {
- case 'g':
- do_gbpages = true;
- break;
- case 'm':
- do_maxphyaddr = true;
- break;
- case 'h':
- default:
- printf("usage: %s [-g (GBPAGES)] [-m (MAXPHYADDR)]\n", argv[0]);
- break;
- }
- }
-
- if (!do_gbpages && !do_maxphyaddr) {
- print_skip("No sub-tests selected");
- return 0;
- }
-
- entry = kvm_get_supported_cpuid_entry(0x80000001);
- if (!(entry->edx & CPUID_GBPAGES)) {
- print_skip("1gb hugepages not supported");
- return 0;
- }
-
- if (do_gbpages) {
- pr_info("Test MMIO after toggling CPUID.GBPAGES\n\n");
- mmu_role_test(&entry->edx, entry->edx & ~CPUID_GBPAGES);
- }
-
- if (do_maxphyaddr) {
- pr_info("Test MMIO after changing CPUID.MAXPHYADDR\n\n");
- entry = kvm_get_supported_cpuid_entry(0x80000008);
- mmu_role_test(&entry->eax, (entry->eax & ~0xff) | 0x20);
- }
-
- return 0;
-}
diff --git a/tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c b/tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c
new file mode 100644
index 000000000000..016070cad36e
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "kvm_util.h"
+#include "processor.h"
+
+#define CPUID_MWAIT (1u << 3)
+
+enum monitor_mwait_testcases {
+ MWAIT_QUIRK_DISABLED = BIT(0),
+ MISC_ENABLES_QUIRK_DISABLED = BIT(1),
+ MWAIT_DISABLED = BIT(2),
+};
+
+static void guest_monitor_wait(int testcase)
+{
+ /*
+ * If both MWAIT and its quirk are disabled, MONITOR/MWAIT should #UD,
+ * in all other scenarios KVM should emulate them as nops.
+ */
+ bool fault_wanted = (testcase & MWAIT_QUIRK_DISABLED) &&
+ (testcase & MWAIT_DISABLED);
+ u8 vector;
+
+ GUEST_SYNC(testcase);
+
+ /*
+ * Arbitrarily MONITOR this function, SVM performs fault checks before
+ * intercept checks, so the inputs for MONITOR and MWAIT must be valid.
+ */
+ vector = kvm_asm_safe("monitor", "a"(guest_monitor_wait), "c"(0), "d"(0));
+ if (fault_wanted)
+ GUEST_ASSERT_2(vector == UD_VECTOR, testcase, vector);
+ else
+ GUEST_ASSERT_2(!vector, testcase, vector);
+
+ vector = kvm_asm_safe("mwait", "a"(guest_monitor_wait), "c"(0), "d"(0));
+ if (fault_wanted)
+ GUEST_ASSERT_2(vector == UD_VECTOR, testcase, vector);
+ else
+ GUEST_ASSERT_2(!vector, testcase, vector);
+}
+
+static void guest_code(void)
+{
+ guest_monitor_wait(MWAIT_DISABLED);
+
+ guest_monitor_wait(MWAIT_QUIRK_DISABLED | MWAIT_DISABLED);
+
+ guest_monitor_wait(MISC_ENABLES_QUIRK_DISABLED | MWAIT_DISABLED);
+ guest_monitor_wait(MISC_ENABLES_QUIRK_DISABLED);
+
+ guest_monitor_wait(MISC_ENABLES_QUIRK_DISABLED | MWAIT_QUIRK_DISABLED | MWAIT_DISABLED);
+ guest_monitor_wait(MISC_ENABLES_QUIRK_DISABLED | MWAIT_QUIRK_DISABLED);
+
+ GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+ uint64_t disabled_quirks;
+ struct kvm_vcpu *vcpu;
+ struct kvm_run *run;
+ struct kvm_vm *vm;
+ struct ucall uc;
+ int testcase;
+
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_DISABLE_QUIRKS2));
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+ vcpu_clear_cpuid_feature(vcpu, X86_FEATURE_MWAIT);
+
+ run = vcpu->run;
+
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(vcpu);
+
+ while (1) {
+ vcpu_run(vcpu);
+
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Unexpected exit reason: %u (%s),\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_SYNC:
+ testcase = uc.args[1];
+ break;
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT_2(uc, "testcase = %lx, vector = %ld");
+ goto done;
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ goto done;
+ }
+
+ disabled_quirks = 0;
+ if (testcase & MWAIT_QUIRK_DISABLED)
+ disabled_quirks |= KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS;
+ if (testcase & MISC_ENABLES_QUIRK_DISABLED)
+ disabled_quirks |= KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT;
+ vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2, disabled_quirks);
+
+ /*
+ * If the MISC_ENABLES quirk (KVM neglects to update CPUID to
+ * enable/disable MWAIT) is disabled, toggle the ENABLE_MWAIT
+ * bit in MISC_ENABLES accordingly. If the quirk is enabled,
+ * the only valid configuration is MWAIT disabled, as CPUID
+ * can't be manually changed after running the vCPU.
+ */
+ if (!(testcase & MISC_ENABLES_QUIRK_DISABLED)) {
+ TEST_ASSERT(testcase & MWAIT_DISABLED,
+ "Can't toggle CPUID features after running vCPU");
+ continue;
+ }
+
+ vcpu_set_msr(vcpu, MSR_IA32_MISC_ENABLE,
+ (testcase & MWAIT_DISABLED) ? 0 : MSR_IA32_MISC_ENABLE_MWAIT);
+ }
+
+done:
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
new file mode 100644
index 000000000000..cc6421716400
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * tools/testing/selftests/kvm/nx_huge_page_test.c
+ *
+ * Usage: to be run via nx_huge_page_test.sh, which does the necessary
+ * environment setup and teardown
+ *
+ * Copyright (C) 2022, Google LLC.
+ */
+
+#define _GNU_SOURCE
+
+#include <fcntl.h>
+#include <stdint.h>
+#include <time.h>
+
+#include <test_util.h>
+#include "kvm_util.h"
+#include "processor.h"
+
+#define HPAGE_SLOT 10
+#define HPAGE_GPA (4UL << 30) /* 4G prevents collision w/ slot 0 */
+#define HPAGE_GVA HPAGE_GPA /* GVA is arbitrary, so use GPA. */
+#define PAGES_PER_2MB_HUGE_PAGE 512
+#define HPAGE_SLOT_NPAGES (3 * PAGES_PER_2MB_HUGE_PAGE)
+
+/*
+ * Passed by nx_huge_pages_test.sh to provide an easy warning if this test is
+ * being run without it.
+ */
+#define MAGIC_TOKEN 887563923
+
+/*
+ * x86 opcode for the return instruction. Used to call into, and then
+ * immediately return from, memory backed with hugepages.
+ */
+#define RETURN_OPCODE 0xC3
+
+/* Call the specified memory address. */
+static void guest_do_CALL(uint64_t target)
+{
+ ((void (*)(void)) target)();
+}
+
+/*
+ * Exit the VM after each memory access so that the userspace component of the
+ * test can make assertions about the pages backing the VM.
+ *
+ * See the below for an explanation of how each access should affect the
+ * backing mappings.
+ */
+void guest_code(void)
+{
+ uint64_t hpage_1 = HPAGE_GVA;
+ uint64_t hpage_2 = hpage_1 + (PAGE_SIZE * 512);
+ uint64_t hpage_3 = hpage_2 + (PAGE_SIZE * 512);
+
+ READ_ONCE(*(uint64_t *)hpage_1);
+ GUEST_SYNC(1);
+
+ READ_ONCE(*(uint64_t *)hpage_2);
+ GUEST_SYNC(2);
+
+ guest_do_CALL(hpage_1);
+ GUEST_SYNC(3);
+
+ guest_do_CALL(hpage_3);
+ GUEST_SYNC(4);
+
+ READ_ONCE(*(uint64_t *)hpage_1);
+ GUEST_SYNC(5);
+
+ READ_ONCE(*(uint64_t *)hpage_3);
+ GUEST_SYNC(6);
+}
+
+static void check_2m_page_count(struct kvm_vm *vm, int expected_pages_2m)
+{
+ int actual_pages_2m;
+
+ actual_pages_2m = vm_get_stat(vm, "pages_2m");
+
+ TEST_ASSERT(actual_pages_2m == expected_pages_2m,
+ "Unexpected 2m page count. Expected %d, got %d",
+ expected_pages_2m, actual_pages_2m);
+}
+
+static void check_split_count(struct kvm_vm *vm, int expected_splits)
+{
+ int actual_splits;
+
+ actual_splits = vm_get_stat(vm, "nx_lpage_splits");
+
+ TEST_ASSERT(actual_splits == expected_splits,
+ "Unexpected NX huge page split count. Expected %d, got %d",
+ expected_splits, actual_splits);
+}
+
+static void wait_for_reclaim(int reclaim_period_ms)
+{
+ long reclaim_wait_ms;
+ struct timespec ts;
+
+ reclaim_wait_ms = reclaim_period_ms * 5;
+ ts.tv_sec = reclaim_wait_ms / 1000;
+ ts.tv_nsec = (reclaim_wait_ms - (ts.tv_sec * 1000)) * 1000000;
+ nanosleep(&ts, NULL);
+}
+
+void run_test(int reclaim_period_ms, bool disable_nx_huge_pages,
+ bool reboot_permissions)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ void *hva;
+ int r;
+
+ vm = vm_create(1);
+
+ if (disable_nx_huge_pages) {
+ /*
+ * Cannot run the test without NX huge pages if the kernel
+ * does not support it.
+ */
+ if (!kvm_check_cap(KVM_CAP_VM_DISABLE_NX_HUGE_PAGES))
+ return;
+
+ r = __vm_disable_nx_huge_pages(vm);
+ if (reboot_permissions) {
+ TEST_ASSERT(!r, "Disabling NX huge pages should succeed if process has reboot permissions");
+ } else {
+ TEST_ASSERT(r == -1 && errno == EPERM,
+ "This process should not have permission to disable NX huge pages");
+ return;
+ }
+ }
+
+ vcpu = vm_vcpu_add(vm, 0, guest_code);
+
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_HUGETLB,
+ HPAGE_GPA, HPAGE_SLOT,
+ HPAGE_SLOT_NPAGES, 0);
+
+ virt_map(vm, HPAGE_GVA, HPAGE_GPA, HPAGE_SLOT_NPAGES);
+
+ hva = addr_gpa2hva(vm, HPAGE_GPA);
+ memset(hva, RETURN_OPCODE, HPAGE_SLOT_NPAGES * PAGE_SIZE);
+
+ check_2m_page_count(vm, 0);
+ check_split_count(vm, 0);
+
+ /*
+ * The guest code will first read from the first hugepage, resulting
+ * in a huge page mapping being created.
+ */
+ vcpu_run(vcpu);
+ check_2m_page_count(vm, 1);
+ check_split_count(vm, 0);
+
+ /*
+ * Then the guest code will read from the second hugepage, resulting
+ * in another huge page mapping being created.
+ */
+ vcpu_run(vcpu);
+ check_2m_page_count(vm, 2);
+ check_split_count(vm, 0);
+
+ /*
+ * Next, the guest will execute from the first huge page, causing it
+ * to be remapped at 4k.
+ *
+ * If NX huge pages are disabled, this should have no effect.
+ */
+ vcpu_run(vcpu);
+ check_2m_page_count(vm, disable_nx_huge_pages ? 2 : 1);
+ check_split_count(vm, disable_nx_huge_pages ? 0 : 1);
+
+ /*
+ * Executing from the third huge page (previously unaccessed) will
+ * cause part to be mapped at 4k.
+ *
+ * If NX huge pages are disabled, it should be mapped at 2M.
+ */
+ vcpu_run(vcpu);
+ check_2m_page_count(vm, disable_nx_huge_pages ? 3 : 1);
+ check_split_count(vm, disable_nx_huge_pages ? 0 : 2);
+
+ /* Reading from the first huge page again should have no effect. */
+ vcpu_run(vcpu);
+ check_2m_page_count(vm, disable_nx_huge_pages ? 3 : 1);
+ check_split_count(vm, disable_nx_huge_pages ? 0 : 2);
+
+ /* Give recovery thread time to run. */
+ wait_for_reclaim(reclaim_period_ms);
+
+ /*
+ * Now that the reclaimer has run, all the split pages should be gone.
+ *
+ * If NX huge pages are disabled, the relaimer will not run, so
+ * nothing should change from here on.
+ */
+ check_2m_page_count(vm, disable_nx_huge_pages ? 3 : 1);
+ check_split_count(vm, 0);
+
+ /*
+ * The 4k mapping on hpage 3 should have been removed, so check that
+ * reading from it causes a huge page mapping to be installed.
+ */
+ vcpu_run(vcpu);
+ check_2m_page_count(vm, disable_nx_huge_pages ? 3 : 2);
+ check_split_count(vm, 0);
+
+ kvm_vm_free(vm);
+}
+
+static void help(char *name)
+{
+ puts("");
+ printf("usage: %s [-h] [-p period_ms] [-t token]\n", name);
+ puts("");
+ printf(" -p: The NX reclaim period in miliseconds.\n");
+ printf(" -t: The magic token to indicate environment setup is done.\n");
+ printf(" -r: The test has reboot permissions and can disable NX huge pages.\n");
+ puts("");
+ exit(0);
+}
+
+int main(int argc, char **argv)
+{
+ int reclaim_period_ms = 0, token = 0, opt;
+ bool reboot_permissions = false;
+
+ while ((opt = getopt(argc, argv, "hp:t:r")) != -1) {
+ switch (opt) {
+ case 'p':
+ reclaim_period_ms = atoi(optarg);
+ break;
+ case 't':
+ token = atoi(optarg);
+ break;
+ case 'r':
+ reboot_permissions = true;
+ break;
+ case 'h':
+ default:
+ help(argv[0]);
+ break;
+ }
+ }
+
+ if (token != MAGIC_TOKEN) {
+ print_skip("This test must be run with the magic token %d.\n"
+ "This is done by nx_huge_pages_test.sh, which\n"
+ "also handles environment setup for the test.",
+ MAGIC_TOKEN);
+ exit(KSFT_SKIP);
+ }
+
+ if (!reclaim_period_ms) {
+ print_skip("The NX reclaim period must be specified and non-zero");
+ exit(KSFT_SKIP);
+ }
+
+ run_test(reclaim_period_ms, false, reboot_permissions);
+ run_test(reclaim_period_ms, true, reboot_permissions);
+
+ return 0;
+}
+
diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
new file mode 100755
index 000000000000..0560149e66ed
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-only */
+#
+# Wrapper script which performs setup and cleanup for nx_huge_pages_test.
+# Makes use of root privileges to set up huge pages and KVM module parameters.
+#
+# tools/testing/selftests/kvm/nx_huge_page_test.sh
+# Copyright (C) 2022, Google LLC.
+
+set -e
+
+NX_HUGE_PAGES=$(cat /sys/module/kvm/parameters/nx_huge_pages)
+NX_HUGE_PAGES_RECOVERY_RATIO=$(cat /sys/module/kvm/parameters/nx_huge_pages_recovery_ratio)
+NX_HUGE_PAGES_RECOVERY_PERIOD=$(cat /sys/module/kvm/parameters/nx_huge_pages_recovery_period_ms)
+HUGE_PAGES=$(cat /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages)
+
+set +e
+
+function sudo_echo () {
+ echo "$1" | sudo tee -a "$2" > /dev/null
+}
+
+NXECUTABLE="$(dirname $0)/nx_huge_pages_test"
+
+sudo_echo test /dev/null || exit 4 # KSFT_SKIP=4
+
+(
+ set -e
+
+ sudo_echo 1 /sys/module/kvm/parameters/nx_huge_pages
+ sudo_echo 1 /sys/module/kvm/parameters/nx_huge_pages_recovery_ratio
+ sudo_echo 100 /sys/module/kvm/parameters/nx_huge_pages_recovery_period_ms
+ sudo_echo "$(( $HUGE_PAGES + 3 ))" /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
+
+ # Test with reboot permissions
+ if [ $(whoami) == "root" ] || sudo setcap cap_sys_boot+ep $NXECUTABLE 2> /dev/null; then
+ echo Running test with CAP_SYS_BOOT enabled
+ $NXECUTABLE -t 887563923 -p 100 -r
+ test $(whoami) == "root" || sudo setcap cap_sys_boot-ep $NXECUTABLE
+ else
+ echo setcap failed, skipping nx_huge_pages_test with CAP_SYS_BOOT enabled
+ fi
+
+ # Test without reboot permissions
+ if [ $(whoami) != "root" ] ; then
+ echo Running test with CAP_SYS_BOOT disabled
+ $NXECUTABLE -t 887563923 -p 100
+ else
+ echo Running as root, skipping nx_huge_pages_test with CAP_SYS_BOOT disabled
+ fi
+)
+RET=$?
+
+sudo_echo "$NX_HUGE_PAGES" /sys/module/kvm/parameters/nx_huge_pages
+sudo_echo "$NX_HUGE_PAGES_RECOVERY_RATIO" /sys/module/kvm/parameters/nx_huge_pages_recovery_ratio
+sudo_echo "$NX_HUGE_PAGES_RECOVERY_PERIOD" /sys/module/kvm/parameters/nx_huge_pages_recovery_period_ms
+sudo_echo "$HUGE_PAGES" /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
+
+exit $RET
diff --git a/tools/testing/selftests/kvm/x86_64/platform_info_test.c b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
index 1e89688cbbbf..76417c7d687b 100644
--- a/tools/testing/selftests/kvm/x86_64/platform_info_test.c
+++ b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
@@ -21,7 +21,6 @@
#include "kvm_util.h"
#include "processor.h"
-#define VCPU_ID 0
#define MSR_PLATFORM_INFO_MAX_TURBO_RATIO 0xff00
static void guest_code(void)
@@ -35,28 +34,18 @@ static void guest_code(void)
}
}
-static void set_msr_platform_info_enabled(struct kvm_vm *vm, bool enable)
+static void test_msr_platform_info_enabled(struct kvm_vcpu *vcpu)
{
- struct kvm_enable_cap cap = {};
-
- cap.cap = KVM_CAP_MSR_PLATFORM_INFO;
- cap.flags = 0;
- cap.args[0] = (int)enable;
- vm_enable_cap(vm, &cap);
-}
-
-static void test_msr_platform_info_enabled(struct kvm_vm *vm)
-{
- struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct kvm_run *run = vcpu->run;
struct ucall uc;
- set_msr_platform_info_enabled(vm, true);
- vcpu_run(vm, VCPU_ID);
+ vm_enable_cap(vcpu->vm, KVM_CAP_MSR_PLATFORM_INFO, true);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Exit_reason other than KVM_EXIT_IO: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- get_ucall(vm, VCPU_ID, &uc);
+ get_ucall(vcpu, &uc);
TEST_ASSERT(uc.cmd == UCALL_SYNC,
"Received ucall other than UCALL_SYNC: %lu\n", uc.cmd);
TEST_ASSERT((uc.args[1] & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) ==
@@ -65,12 +54,12 @@ static void test_msr_platform_info_enabled(struct kvm_vm *vm)
MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
}
-static void test_msr_platform_info_disabled(struct kvm_vm *vm)
+static void test_msr_platform_info_disabled(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct kvm_run *run = vcpu->run;
- set_msr_platform_info_enabled(vm, false);
- vcpu_run(vm, VCPU_ID);
+ vm_enable_cap(vcpu->vm, KVM_CAP_MSR_PLATFORM_INFO, false);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
"Exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n",
run->exit_reason,
@@ -79,27 +68,23 @@ static void test_msr_platform_info_disabled(struct kvm_vm *vm)
int main(int argc, char *argv[])
{
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- int rv;
uint64_t msr_platform_info;
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
- rv = kvm_check_cap(KVM_CAP_MSR_PLATFORM_INFO);
- if (!rv) {
- print_skip("KVM_CAP_MSR_PLATFORM_INFO not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_MSR_PLATFORM_INFO));
- vm = vm_create_default(VCPU_ID, 0, guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
- msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO);
- vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO,
- msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
- test_msr_platform_info_enabled(vm);
- test_msr_platform_info_disabled(vm);
- vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info);
+ msr_platform_info = vcpu_get_msr(vcpu, MSR_PLATFORM_INFO);
+ vcpu_set_msr(vcpu, MSR_PLATFORM_INFO,
+ msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
+ test_msr_platform_info_enabled(vcpu);
+ test_msr_platform_info_disabled(vcpu);
+ vcpu_set_msr(vcpu, MSR_PLATFORM_INFO, msr_platform_info);
kvm_vm_free(vm);
diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
index 93d77574b255..ea4e259a1e2e 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
@@ -49,7 +49,6 @@ union cpuid10_ebx {
/* Oddly, this isn't in perf_event.h. */
#define ARCH_PERFMON_BRANCHES_RETIRED 5
-#define VCPU_ID 0
#define NUM_BRANCHES 42
/*
@@ -173,17 +172,17 @@ static void amd_guest_code(void)
* Run the VM to the next GUEST_SYNC(value), and return the value passed
* to the sync. Any other exit from the guest is fatal.
*/
-static uint64_t run_vm_to_sync(struct kvm_vm *vm)
+static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct kvm_run *run = vcpu->run;
struct ucall uc;
- vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- get_ucall(vm, VCPU_ID, &uc);
+ get_ucall(vcpu, &uc);
TEST_ASSERT(uc.cmd == UCALL_SYNC,
"Received ucall other than UCALL_SYNC: %lu", uc.cmd);
return uc.args[1];
@@ -197,13 +196,13 @@ static uint64_t run_vm_to_sync(struct kvm_vm *vm)
* a sanity check and then GUEST_SYNC(success). In the case of failure,
* the behavior of the guest on resumption is undefined.
*/
-static bool sanity_check_pmu(struct kvm_vm *vm)
+static bool sanity_check_pmu(struct kvm_vcpu *vcpu)
{
bool success;
- vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
- success = run_vm_to_sync(vm);
- vm_install_exception_handler(vm, GP_VECTOR, NULL);
+ vm_install_exception_handler(vcpu->vm, GP_VECTOR, guest_gp_handler);
+ success = run_vcpu_to_sync(vcpu);
+ vm_install_exception_handler(vcpu->vm, GP_VECTOR, NULL);
return success;
}
@@ -264,9 +263,9 @@ static struct kvm_pmu_event_filter *remove_event(struct kvm_pmu_event_filter *f,
return f;
}
-static void test_without_filter(struct kvm_vm *vm)
+static void test_without_filter(struct kvm_vcpu *vcpu)
{
- uint64_t count = run_vm_to_sync(vm);
+ uint64_t count = run_vcpu_to_sync(vcpu);
if (count != NUM_BRANCHES)
pr_info("%s: Branch instructions retired = %lu (expected %u)\n",
@@ -274,21 +273,21 @@ static void test_without_filter(struct kvm_vm *vm)
TEST_ASSERT(count, "Allowed PMU event is not counting");
}
-static uint64_t test_with_filter(struct kvm_vm *vm,
+static uint64_t test_with_filter(struct kvm_vcpu *vcpu,
struct kvm_pmu_event_filter *f)
{
- vm_ioctl(vm, KVM_SET_PMU_EVENT_FILTER, (void *)f);
- return run_vm_to_sync(vm);
+ vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
+ return run_vcpu_to_sync(vcpu);
}
-static void test_amd_deny_list(struct kvm_vm *vm)
+static void test_amd_deny_list(struct kvm_vcpu *vcpu)
{
uint64_t event = EVENT(0x1C2, 0);
struct kvm_pmu_event_filter *f;
uint64_t count;
f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY);
- count = test_with_filter(vm, f);
+ count = test_with_filter(vcpu, f);
free(f);
if (count != NUM_BRANCHES)
@@ -297,10 +296,10 @@ static void test_amd_deny_list(struct kvm_vm *vm)
TEST_ASSERT(count, "Allowed PMU event is not counting");
}
-static void test_member_deny_list(struct kvm_vm *vm)
+static void test_member_deny_list(struct kvm_vcpu *vcpu)
{
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
- uint64_t count = test_with_filter(vm, f);
+ uint64_t count = test_with_filter(vcpu, f);
free(f);
if (count)
@@ -309,10 +308,10 @@ static void test_member_deny_list(struct kvm_vm *vm)
TEST_ASSERT(!count, "Disallowed PMU Event is counting");
}
-static void test_member_allow_list(struct kvm_vm *vm)
+static void test_member_allow_list(struct kvm_vcpu *vcpu)
{
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
- uint64_t count = test_with_filter(vm, f);
+ uint64_t count = test_with_filter(vcpu, f);
free(f);
if (count != NUM_BRANCHES)
@@ -321,14 +320,14 @@ static void test_member_allow_list(struct kvm_vm *vm)
TEST_ASSERT(count, "Allowed PMU event is not counting");
}
-static void test_not_member_deny_list(struct kvm_vm *vm)
+static void test_not_member_deny_list(struct kvm_vcpu *vcpu)
{
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
uint64_t count;
remove_event(f, INTEL_BR_RETIRED);
remove_event(f, AMD_ZEN_BR_RETIRED);
- count = test_with_filter(vm, f);
+ count = test_with_filter(vcpu, f);
free(f);
if (count != NUM_BRANCHES)
pr_info("%s: Branch instructions retired = %lu (expected %u)\n",
@@ -336,14 +335,14 @@ static void test_not_member_deny_list(struct kvm_vm *vm)
TEST_ASSERT(count, "Allowed PMU event is not counting");
}
-static void test_not_member_allow_list(struct kvm_vm *vm)
+static void test_not_member_allow_list(struct kvm_vcpu *vcpu)
{
struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
uint64_t count;
remove_event(f, INTEL_BR_RETIRED);
remove_event(f, AMD_ZEN_BR_RETIRED);
- count = test_with_filter(vm, f);
+ count = test_with_filter(vcpu, f);
free(f);
if (count)
pr_info("%s: Branch instructions retired = %lu (expected 0)\n",
@@ -358,25 +357,23 @@ static void test_not_member_allow_list(struct kvm_vm *vm)
*/
static void test_pmu_config_disable(void (*guest_code)(void))
{
+ struct kvm_vcpu *vcpu;
int r;
struct kvm_vm *vm;
- struct kvm_enable_cap cap = { 0 };
r = kvm_check_cap(KVM_CAP_PMU_CAPABILITY);
if (!(r & KVM_PMU_CAP_DISABLE))
return;
- vm = vm_create_without_vcpus(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES);
+ vm = vm_create(1);
- cap.cap = KVM_CAP_PMU_CAPABILITY;
- cap.args[0] = KVM_PMU_CAP_DISABLE;
- TEST_ASSERT(!vm_enable_cap(vm, &cap), "Failed to set KVM_PMU_CAP_DISABLE.");
+ vm_enable_cap(vm, KVM_CAP_PMU_CAPABILITY, KVM_PMU_CAP_DISABLE);
- vm_vcpu_add_default(vm, VCPU_ID, guest_code);
+ vcpu = vm_vcpu_add(vm, 0, guest_code);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, VCPU_ID);
+ vcpu_init_descriptor_tables(vcpu);
- TEST_ASSERT(!sanity_check_pmu(vm),
+ TEST_ASSERT(!sanity_check_pmu(vcpu),
"Guest should not be able to use disabled PMU.");
kvm_vm_free(vm);
@@ -387,7 +384,7 @@ static void test_pmu_config_disable(void (*guest_code)(void))
* counter per logical processor, an EBX bit vector of length greater
* than 5, and EBX[5] clear.
*/
-static bool check_intel_pmu_leaf(struct kvm_cpuid_entry2 *entry)
+static bool check_intel_pmu_leaf(const struct kvm_cpuid_entry2 *entry)
{
union cpuid10_eax eax = { .full = entry->eax };
union cpuid10_ebx ebx = { .full = entry->ebx };
@@ -403,10 +400,10 @@ static bool check_intel_pmu_leaf(struct kvm_cpuid_entry2 *entry)
*/
static bool use_intel_pmu(void)
{
- struct kvm_cpuid_entry2 *entry;
+ const struct kvm_cpuid_entry2 *entry;
- entry = kvm_get_supported_cpuid_index(0xa, 0);
- return is_intel_cpu() && entry && check_intel_pmu_leaf(entry);
+ entry = kvm_get_supported_cpuid_entry(0xa);
+ return is_intel_cpu() && check_intel_pmu_leaf(entry);
}
static bool is_zen1(uint32_t eax)
@@ -435,10 +432,10 @@ static bool is_zen3(uint32_t eax)
*/
static bool use_amd_pmu(void)
{
- struct kvm_cpuid_entry2 *entry;
+ const struct kvm_cpuid_entry2 *entry;
- entry = kvm_get_supported_cpuid_index(1, 0);
- return is_amd_cpu() && entry &&
+ entry = kvm_get_supported_cpuid_entry(1);
+ return is_amd_cpu() &&
(is_zen1(entry->eax) ||
is_zen2(entry->eax) ||
is_zen3(entry->eax));
@@ -446,47 +443,33 @@ static bool use_amd_pmu(void)
int main(int argc, char *argv[])
{
- void (*guest_code)(void) = NULL;
+ void (*guest_code)(void);
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- int r;
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
- r = kvm_check_cap(KVM_CAP_PMU_EVENT_FILTER);
- if (!r) {
- print_skip("KVM_CAP_PMU_EVENT_FILTER not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_PMU_EVENT_FILTER));
- if (use_intel_pmu())
- guest_code = intel_guest_code;
- else if (use_amd_pmu())
- guest_code = amd_guest_code;
+ TEST_REQUIRE(use_intel_pmu() || use_amd_pmu());
+ guest_code = use_intel_pmu() ? intel_guest_code : amd_guest_code;
- if (!guest_code) {
- print_skip("Don't know how to test this guest PMU");
- exit(KSFT_SKIP);
- }
-
- vm = vm_create_default(VCPU_ID, 0, guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, VCPU_ID);
+ vcpu_init_descriptor_tables(vcpu);
- if (!sanity_check_pmu(vm)) {
- print_skip("Guest PMU is not functional");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(sanity_check_pmu(vcpu));
if (use_amd_pmu())
- test_amd_deny_list(vm);
+ test_amd_deny_list(vcpu);
- test_without_filter(vm);
- test_member_deny_list(vm);
- test_member_allow_list(vm);
- test_not_member_deny_list(vm);
- test_not_member_allow_list(vm);
+ test_without_filter(vcpu);
+ test_member_deny_list(vcpu);
+ test_member_allow_list(vcpu);
+ test_not_member_deny_list(vcpu);
+ test_not_member_allow_list(vcpu);
kvm_vm_free(vm);
diff --git a/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c b/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
index ae76436af0cc..b25d7556b638 100644
--- a/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
+++ b/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
@@ -16,10 +16,6 @@
#include "processor.h"
#include "apic.h"
-#define N_VCPU 2
-#define VCPU_ID0 0
-#define VCPU_ID1 1
-
static void guest_bsp_vcpu(void *arg)
{
GUEST_SYNC(1);
@@ -38,31 +34,30 @@ static void guest_not_bsp_vcpu(void *arg)
GUEST_DONE();
}
-static void test_set_boot_busy(struct kvm_vm *vm)
+static void test_set_bsp_busy(struct kvm_vcpu *vcpu, const char *msg)
{
- int res;
+ int r = __vm_ioctl(vcpu->vm, KVM_SET_BOOT_CPU_ID,
+ (void *)(unsigned long)vcpu->id);
- res = _vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *) VCPU_ID0);
- TEST_ASSERT(res == -1 && errno == EBUSY,
- "KVM_SET_BOOT_CPU_ID set while running vm");
+ TEST_ASSERT(r == -1 && errno == EBUSY, "KVM_SET_BOOT_CPU_ID set %s", msg);
}
-static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid)
+static void run_vcpu(struct kvm_vcpu *vcpu)
{
struct ucall uc;
int stage;
for (stage = 0; stage < 2; stage++) {
- vcpu_run(vm, vcpuid);
+ vcpu_run(vcpu);
- switch (get_ucall(vm, vcpuid, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
uc.args[1] == stage + 1,
"Stage %d: Unexpected register values vmexit, got %lx",
stage + 1, (ulong)uc.args[1]);
- test_set_boot_busy(vm);
+ test_set_bsp_busy(vcpu, "while running vm");
break;
case UCALL_DONE:
TEST_ASSERT(stage == 1,
@@ -70,91 +65,67 @@ static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid)
stage);
break;
case UCALL_ABORT:
- TEST_ASSERT(false, "%s at %s:%ld\n\tvalues: %#lx, %#lx",
- (const char *)uc.args[0], __FILE__,
- uc.args[1], uc.args[2], uc.args[3]);
+ REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
default:
TEST_ASSERT(false, "Unexpected exit: %s",
- exit_reason_str(vcpu_state(vm, vcpuid)->exit_reason));
+ exit_reason_str(vcpu->run->exit_reason));
}
}
}
-static struct kvm_vm *create_vm(void)
+static struct kvm_vm *create_vm(uint32_t nr_vcpus, uint32_t bsp_vcpu_id,
+ struct kvm_vcpu *vcpus[])
{
struct kvm_vm *vm;
- uint64_t vcpu_pages = (DEFAULT_STACK_PGS) * 2;
- uint64_t extra_pg_pages = vcpu_pages / PTES_PER_MIN_PAGE * N_VCPU;
- uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages;
+ uint32_t i;
- pages = vm_adjust_num_guest_pages(VM_MODE_DEFAULT, pages);
- vm = vm_create(VM_MODE_DEFAULT, pages, O_RDWR);
+ vm = vm_create(nr_vcpus);
- kvm_vm_elf_load(vm, program_invocation_name);
- vm_create_irqchip(vm);
+ vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *)(unsigned long)bsp_vcpu_id);
+ for (i = 0; i < nr_vcpus; i++)
+ vcpus[i] = vm_vcpu_add(vm, i, i == bsp_vcpu_id ? guest_bsp_vcpu :
+ guest_not_bsp_vcpu);
return vm;
}
-static void add_x86_vcpu(struct kvm_vm *vm, uint32_t vcpuid, bool bsp_code)
-{
- if (bsp_code)
- vm_vcpu_add_default(vm, vcpuid, guest_bsp_vcpu);
- else
- vm_vcpu_add_default(vm, vcpuid, guest_not_bsp_vcpu);
-}
-
-static void run_vm_bsp(uint32_t bsp_vcpu)
+static void run_vm_bsp(uint32_t bsp_vcpu_id)
{
+ struct kvm_vcpu *vcpus[2];
struct kvm_vm *vm;
- bool is_bsp_vcpu1 = bsp_vcpu == VCPU_ID1;
- vm = create_vm();
+ vm = create_vm(ARRAY_SIZE(vcpus), bsp_vcpu_id, vcpus);
- if (is_bsp_vcpu1)
- vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *) VCPU_ID1);
-
- add_x86_vcpu(vm, VCPU_ID0, !is_bsp_vcpu1);
- add_x86_vcpu(vm, VCPU_ID1, is_bsp_vcpu1);
-
- run_vcpu(vm, VCPU_ID0);
- run_vcpu(vm, VCPU_ID1);
+ run_vcpu(vcpus[0]);
+ run_vcpu(vcpus[1]);
kvm_vm_free(vm);
}
static void check_set_bsp_busy(void)
{
+ struct kvm_vcpu *vcpus[2];
struct kvm_vm *vm;
- int res;
- vm = create_vm();
+ vm = create_vm(ARRAY_SIZE(vcpus), 0, vcpus);
- add_x86_vcpu(vm, VCPU_ID0, true);
- add_x86_vcpu(vm, VCPU_ID1, false);
+ test_set_bsp_busy(vcpus[1], "after adding vcpu");
- res = _vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *) VCPU_ID1);
- TEST_ASSERT(res == -1 && errno == EBUSY, "KVM_SET_BOOT_CPU_ID set after adding vcpu");
+ run_vcpu(vcpus[0]);
+ run_vcpu(vcpus[1]);
- run_vcpu(vm, VCPU_ID0);
- run_vcpu(vm, VCPU_ID1);
-
- res = _vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *) VCPU_ID1);
- TEST_ASSERT(res == -1 && errno == EBUSY, "KVM_SET_BOOT_CPU_ID set to a terminated vcpu");
+ test_set_bsp_busy(vcpus[1], "to a terminated vcpu");
kvm_vm_free(vm);
}
int main(int argc, char *argv[])
{
- if (!kvm_check_cap(KVM_CAP_SET_BOOT_CPU_ID)) {
- print_skip("set_boot_cpu_id not available");
- return 0;
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_SET_BOOT_CPU_ID));
- run_vm_bsp(VCPU_ID0);
- run_vm_bsp(VCPU_ID1);
- run_vm_bsp(VCPU_ID0);
+ run_vm_bsp(0);
+ run_vm_bsp(1);
+ run_vm_bsp(0);
check_set_bsp_busy();
}
diff --git a/tools/testing/selftests/kvm/x86_64/set_sregs_test.c b/tools/testing/selftests/kvm/x86_64/set_sregs_test.c
index 318be0bf77ab..2bb08bf2125d 100644
--- a/tools/testing/selftests/kvm/x86_64/set_sregs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/set_sregs_test.c
@@ -22,9 +22,7 @@
#include "kvm_util.h"
#include "processor.h"
-#define VCPU_ID 5
-
-static void test_cr4_feature_bit(struct kvm_vm *vm, struct kvm_sregs *orig,
+static void test_cr4_feature_bit(struct kvm_vcpu *vcpu, struct kvm_sregs *orig,
uint64_t feature_bit)
{
struct kvm_sregs sregs;
@@ -37,44 +35,40 @@ static void test_cr4_feature_bit(struct kvm_vm *vm, struct kvm_sregs *orig,
memcpy(&sregs, orig, sizeof(sregs));
sregs.cr4 |= feature_bit;
- rc = _vcpu_sregs_set(vm, VCPU_ID, &sregs);
+ rc = _vcpu_sregs_set(vcpu, &sregs);
TEST_ASSERT(rc, "KVM allowed unsupported CR4 bit (0x%lx)", feature_bit);
/* Sanity check that KVM didn't change anything. */
- vcpu_sregs_get(vm, VCPU_ID, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
TEST_ASSERT(!memcmp(&sregs, orig, sizeof(sregs)), "KVM modified sregs");
}
-static uint64_t calc_cr4_feature_bits(struct kvm_vm *vm)
+static uint64_t calc_supported_cr4_feature_bits(void)
{
- struct kvm_cpuid_entry2 *cpuid_1, *cpuid_7;
uint64_t cr4;
- cpuid_1 = kvm_get_supported_cpuid_entry(1);
- cpuid_7 = kvm_get_supported_cpuid_entry(7);
-
cr4 = X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE |
X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE | X86_CR4_PGE |
X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_OSXMMEXCPT;
- if (cpuid_7->ecx & CPUID_UMIP)
+ if (kvm_cpu_has(X86_FEATURE_UMIP))
cr4 |= X86_CR4_UMIP;
- if (cpuid_7->ecx & CPUID_LA57)
+ if (kvm_cpu_has(X86_FEATURE_LA57))
cr4 |= X86_CR4_LA57;
- if (cpuid_1->ecx & CPUID_VMX)
+ if (kvm_cpu_has(X86_FEATURE_VMX))
cr4 |= X86_CR4_VMXE;
- if (cpuid_1->ecx & CPUID_SMX)
+ if (kvm_cpu_has(X86_FEATURE_SMX))
cr4 |= X86_CR4_SMXE;
- if (cpuid_7->ebx & CPUID_FSGSBASE)
+ if (kvm_cpu_has(X86_FEATURE_FSGSBASE))
cr4 |= X86_CR4_FSGSBASE;
- if (cpuid_1->ecx & CPUID_PCID)
+ if (kvm_cpu_has(X86_FEATURE_PCID))
cr4 |= X86_CR4_PCIDE;
- if (cpuid_1->ecx & CPUID_XSAVE)
+ if (kvm_cpu_has(X86_FEATURE_XSAVE))
cr4 |= X86_CR4_OSXSAVE;
- if (cpuid_7->ebx & CPUID_SMEP)
+ if (kvm_cpu_has(X86_FEATURE_SMEP))
cr4 |= X86_CR4_SMEP;
- if (cpuid_7->ebx & CPUID_SMAP)
+ if (kvm_cpu_has(X86_FEATURE_SMAP))
cr4 |= X86_CR4_SMAP;
- if (cpuid_7->ecx & CPUID_PKU)
+ if (kvm_cpu_has(X86_FEATURE_PKU))
cr4 |= X86_CR4_PKE;
return cr4;
@@ -83,6 +77,7 @@ static uint64_t calc_cr4_feature_bits(struct kvm_vm *vm)
int main(int argc, char *argv[])
{
struct kvm_sregs sregs;
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
uint64_t cr4;
int rc;
@@ -95,44 +90,44 @@ int main(int argc, char *argv[])
* use it to verify all supported CR4 bits can be set prior to defining
* the vCPU model, i.e. without doing KVM_SET_CPUID2.
*/
- vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
- vm_vcpu_add(vm, VCPU_ID);
+ vm = vm_create_barebones();
+ vcpu = __vm_vcpu_add(vm, 0);
- vcpu_sregs_get(vm, VCPU_ID, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
- sregs.cr4 |= calc_cr4_feature_bits(vm);
+ sregs.cr4 |= calc_supported_cr4_feature_bits();
cr4 = sregs.cr4;
- rc = _vcpu_sregs_set(vm, VCPU_ID, &sregs);
+ rc = _vcpu_sregs_set(vcpu, &sregs);
TEST_ASSERT(!rc, "Failed to set supported CR4 bits (0x%lx)", cr4);
- vcpu_sregs_get(vm, VCPU_ID, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
TEST_ASSERT(sregs.cr4 == cr4, "sregs.CR4 (0x%llx) != CR4 (0x%lx)",
sregs.cr4, cr4);
/* Verify all unsupported features are rejected by KVM. */
- test_cr4_feature_bit(vm, &sregs, X86_CR4_UMIP);
- test_cr4_feature_bit(vm, &sregs, X86_CR4_LA57);
- test_cr4_feature_bit(vm, &sregs, X86_CR4_VMXE);
- test_cr4_feature_bit(vm, &sregs, X86_CR4_SMXE);
- test_cr4_feature_bit(vm, &sregs, X86_CR4_FSGSBASE);
- test_cr4_feature_bit(vm, &sregs, X86_CR4_PCIDE);
- test_cr4_feature_bit(vm, &sregs, X86_CR4_OSXSAVE);
- test_cr4_feature_bit(vm, &sregs, X86_CR4_SMEP);
- test_cr4_feature_bit(vm, &sregs, X86_CR4_SMAP);
- test_cr4_feature_bit(vm, &sregs, X86_CR4_PKE);
+ test_cr4_feature_bit(vcpu, &sregs, X86_CR4_UMIP);
+ test_cr4_feature_bit(vcpu, &sregs, X86_CR4_LA57);
+ test_cr4_feature_bit(vcpu, &sregs, X86_CR4_VMXE);
+ test_cr4_feature_bit(vcpu, &sregs, X86_CR4_SMXE);
+ test_cr4_feature_bit(vcpu, &sregs, X86_CR4_FSGSBASE);
+ test_cr4_feature_bit(vcpu, &sregs, X86_CR4_PCIDE);
+ test_cr4_feature_bit(vcpu, &sregs, X86_CR4_OSXSAVE);
+ test_cr4_feature_bit(vcpu, &sregs, X86_CR4_SMEP);
+ test_cr4_feature_bit(vcpu, &sregs, X86_CR4_SMAP);
+ test_cr4_feature_bit(vcpu, &sregs, X86_CR4_PKE);
kvm_vm_free(vm);
/* Create a "real" VM and verify APIC_BASE can be set. */
- vm = vm_create_default(VCPU_ID, 0, NULL);
+ vm = vm_create_with_one_vcpu(&vcpu, NULL);
- vcpu_sregs_get(vm, VCPU_ID, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
sregs.apic_base = 1 << 10;
- rc = _vcpu_sregs_set(vm, VCPU_ID, &sregs);
+ rc = _vcpu_sregs_set(vcpu, &sregs);
TEST_ASSERT(rc, "Set IA32_APIC_BASE to %llx (invalid)",
sregs.apic_base);
sregs.apic_base = 1 << 11;
- rc = _vcpu_sregs_set(vm, VCPU_ID, &sregs);
+ rc = _vcpu_sregs_set(vcpu, &sregs);
TEST_ASSERT(!rc, "Couldn't set IA32_APIC_BASE to %llx (valid)",
sregs.apic_base);
diff --git a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
index d1dc1acf997c..c7ef97561038 100644
--- a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
+++ b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
@@ -12,7 +12,6 @@
#include "processor.h"
#include "svm_util.h"
#include "kselftest.h"
-#include "../lib/kvm_util_internal.h"
#define SEV_POLICY_ES 0b100
@@ -54,10 +53,10 @@ static struct kvm_vm *sev_vm_create(bool es)
struct kvm_sev_launch_start start = { 0 };
int i;
- vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
+ vm = vm_create_barebones();
sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL);
for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
- vm_vcpu_add(vm, i);
+ __vm_vcpu_add(vm, i);
if (es)
start.policy |= SEV_POLICY_ES;
sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start);
@@ -71,32 +70,27 @@ static struct kvm_vm *aux_vm_create(bool with_vcpus)
struct kvm_vm *vm;
int i;
- vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
+ vm = vm_create_barebones();
if (!with_vcpus)
return vm;
for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
- vm_vcpu_add(vm, i);
+ __vm_vcpu_add(vm, i);
return vm;
}
-static int __sev_migrate_from(int dst_fd, int src_fd)
+static int __sev_migrate_from(struct kvm_vm *dst, struct kvm_vm *src)
{
- struct kvm_enable_cap cap = {
- .cap = KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM,
- .args = { src_fd }
- };
-
- return ioctl(dst_fd, KVM_ENABLE_CAP, &cap);
+ return __vm_enable_cap(dst, KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM, src->fd);
}
-static void sev_migrate_from(int dst_fd, int src_fd)
+static void sev_migrate_from(struct kvm_vm *dst, struct kvm_vm *src)
{
int ret;
- ret = __sev_migrate_from(dst_fd, src_fd);
+ ret = __sev_migrate_from(dst, src);
TEST_ASSERT(!ret, "Migration failed, ret: %d, errno: %d\n", ret, errno);
}
@@ -111,13 +105,13 @@ static void test_sev_migrate_from(bool es)
dst_vms[i] = aux_vm_create(true);
/* Initial migration from the src to the first dst. */
- sev_migrate_from(dst_vms[0]->fd, src_vm->fd);
+ sev_migrate_from(dst_vms[0], src_vm);
for (i = 1; i < NR_MIGRATE_TEST_VMS; i++)
- sev_migrate_from(dst_vms[i]->fd, dst_vms[i - 1]->fd);
+ sev_migrate_from(dst_vms[i], dst_vms[i - 1]);
/* Migrate the guest back to the original VM. */
- ret = __sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd);
+ ret = __sev_migrate_from(src_vm, dst_vms[NR_MIGRATE_TEST_VMS - 1]);
TEST_ASSERT(ret == -1 && errno == EIO,
"VM that was migrated from should be dead. ret %d, errno: %d\n", ret,
errno);
@@ -129,7 +123,7 @@ static void test_sev_migrate_from(bool es)
struct locking_thread_input {
struct kvm_vm *vm;
- int source_fds[NR_LOCK_TESTING_THREADS];
+ struct kvm_vm *source_vms[NR_LOCK_TESTING_THREADS];
};
static void *locking_test_thread(void *arg)
@@ -139,7 +133,7 @@ static void *locking_test_thread(void *arg)
for (i = 0; i < NR_LOCK_TESTING_ITERATIONS; ++i) {
j = i % NR_LOCK_TESTING_THREADS;
- __sev_migrate_from(input->vm->fd, input->source_fds[j]);
+ __sev_migrate_from(input->vm, input->source_vms[j]);
}
return NULL;
@@ -153,11 +147,11 @@ static void test_sev_migrate_locking(void)
for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i) {
input[i].vm = sev_vm_create(/* es= */ false);
- input[0].source_fds[i] = input[i].vm->fd;
+ input[0].source_vms[i] = input[i].vm;
}
for (i = 1; i < NR_LOCK_TESTING_THREADS; ++i)
- memcpy(input[i].source_fds, input[0].source_fds,
- sizeof(input[i].source_fds));
+ memcpy(input[i].source_vms, input[0].source_vms,
+ sizeof(input[i].source_vms));
for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
pthread_create(&pt[i], NULL, locking_test_thread, &input[i]);
@@ -174,9 +168,9 @@ static void test_sev_migrate_parameters(void)
*sev_es_vm_no_vmsa;
int ret;
- vm_no_vcpu = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
+ vm_no_vcpu = vm_create_barebones();
vm_no_sev = aux_vm_create(true);
- ret = __sev_migrate_from(vm_no_vcpu->fd, vm_no_sev->fd);
+ ret = __sev_migrate_from(vm_no_vcpu, vm_no_sev);
TEST_ASSERT(ret == -1 && errno == EINVAL,
"Migrations require SEV enabled. ret %d, errno: %d\n", ret,
errno);
@@ -186,29 +180,29 @@ static void test_sev_migrate_parameters(void)
sev_vm = sev_vm_create(/* es= */ false);
sev_es_vm = sev_vm_create(/* es= */ true);
- sev_es_vm_no_vmsa = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
+ sev_es_vm_no_vmsa = vm_create_barebones();
sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL);
- vm_vcpu_add(sev_es_vm_no_vmsa, 1);
+ __vm_vcpu_add(sev_es_vm_no_vmsa, 1);
- ret = __sev_migrate_from(sev_vm->fd, sev_es_vm->fd);
+ ret = __sev_migrate_from(sev_vm, sev_es_vm);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
"Should not be able migrate to SEV enabled VM. ret: %d, errno: %d\n",
ret, errno);
- ret = __sev_migrate_from(sev_es_vm->fd, sev_vm->fd);
+ ret = __sev_migrate_from(sev_es_vm, sev_vm);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
"Should not be able migrate to SEV-ES enabled VM. ret: %d, errno: %d\n",
ret, errno);
- ret = __sev_migrate_from(vm_no_vcpu->fd, sev_es_vm->fd);
+ ret = __sev_migrate_from(vm_no_vcpu, sev_es_vm);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
"SEV-ES migrations require same number of vCPUS. ret: %d, errno: %d\n",
ret, errno);
- ret = __sev_migrate_from(vm_no_vcpu->fd, sev_es_vm_no_vmsa->fd);
+ ret = __sev_migrate_from(vm_no_vcpu, sev_es_vm_no_vmsa);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
"SEV-ES migrations require UPDATE_VMSA. ret %d, errno: %d\n",
@@ -222,22 +216,17 @@ out:
kvm_vm_free(vm_no_sev);
}
-static int __sev_mirror_create(int dst_fd, int src_fd)
+static int __sev_mirror_create(struct kvm_vm *dst, struct kvm_vm *src)
{
- struct kvm_enable_cap cap = {
- .cap = KVM_CAP_VM_COPY_ENC_CONTEXT_FROM,
- .args = { src_fd }
- };
-
- return ioctl(dst_fd, KVM_ENABLE_CAP, &cap);
+ return __vm_enable_cap(dst, KVM_CAP_VM_COPY_ENC_CONTEXT_FROM, src->fd);
}
-static void sev_mirror_create(int dst_fd, int src_fd)
+static void sev_mirror_create(struct kvm_vm *dst, struct kvm_vm *src)
{
int ret;
- ret = __sev_mirror_create(dst_fd, src_fd);
+ ret = __sev_mirror_create(dst, src);
TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d\n", ret, errno);
}
@@ -285,11 +274,11 @@ static void test_sev_mirror(bool es)
src_vm = sev_vm_create(es);
dst_vm = aux_vm_create(false);
- sev_mirror_create(dst_vm->fd, src_vm->fd);
+ sev_mirror_create(dst_vm, src_vm);
/* Check that we can complete creation of the mirror VM. */
for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
- vm_vcpu_add(dst_vm, i);
+ __vm_vcpu_add(dst_vm, i);
if (es)
sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
@@ -309,18 +298,18 @@ static void test_sev_mirror_parameters(void)
vm_with_vcpu = aux_vm_create(true);
vm_no_vcpu = aux_vm_create(false);
- ret = __sev_mirror_create(sev_vm->fd, sev_vm->fd);
+ ret = __sev_mirror_create(sev_vm, sev_vm);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
"Should not be able copy context to self. ret: %d, errno: %d\n",
ret, errno);
- ret = __sev_mirror_create(vm_no_vcpu->fd, vm_with_vcpu->fd);
+ ret = __sev_mirror_create(vm_no_vcpu, vm_with_vcpu);
TEST_ASSERT(ret == -1 && errno == EINVAL,
"Copy context requires SEV enabled. ret %d, errno: %d\n", ret,
errno);
- ret = __sev_mirror_create(vm_with_vcpu->fd, sev_vm->fd);
+ ret = __sev_mirror_create(vm_with_vcpu, sev_vm);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
"SEV copy context requires no vCPUS on the destination. ret: %d, errno: %d\n",
@@ -330,13 +319,13 @@ static void test_sev_mirror_parameters(void)
goto out;
sev_es_vm = sev_vm_create(/* es= */ true);
- ret = __sev_mirror_create(sev_vm->fd, sev_es_vm->fd);
+ ret = __sev_mirror_create(sev_vm, sev_es_vm);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
"Should not be able copy context to SEV enabled VM. ret: %d, errno: %d\n",
ret, errno);
- ret = __sev_mirror_create(sev_es_vm->fd, sev_vm->fd);
+ ret = __sev_mirror_create(sev_es_vm, sev_vm);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
"Should not be able copy context to SEV-ES enabled VM. ret: %d, errno: %d\n",
@@ -364,16 +353,16 @@ static void test_sev_move_copy(void)
dst2_mirror_vm = aux_vm_create(false);
dst3_mirror_vm = aux_vm_create(false);
- sev_mirror_create(mirror_vm->fd, sev_vm->fd);
+ sev_mirror_create(mirror_vm, sev_vm);
- sev_migrate_from(dst_mirror_vm->fd, mirror_vm->fd);
- sev_migrate_from(dst_vm->fd, sev_vm->fd);
+ sev_migrate_from(dst_mirror_vm, mirror_vm);
+ sev_migrate_from(dst_vm, sev_vm);
- sev_migrate_from(dst2_vm->fd, dst_vm->fd);
- sev_migrate_from(dst2_mirror_vm->fd, dst_mirror_vm->fd);
+ sev_migrate_from(dst2_vm, dst_vm);
+ sev_migrate_from(dst2_mirror_vm, dst_mirror_vm);
- sev_migrate_from(dst3_mirror_vm->fd, dst2_mirror_vm->fd);
- sev_migrate_from(dst3_vm->fd, dst2_vm->fd);
+ sev_migrate_from(dst3_mirror_vm, dst2_mirror_vm);
+ sev_migrate_from(dst3_vm, dst2_vm);
kvm_vm_free(dst_vm);
kvm_vm_free(sev_vm);
@@ -393,10 +382,10 @@ static void test_sev_move_copy(void)
mirror_vm = aux_vm_create(false);
dst_mirror_vm = aux_vm_create(false);
- sev_mirror_create(mirror_vm->fd, sev_vm->fd);
+ sev_mirror_create(mirror_vm, sev_vm);
- sev_migrate_from(dst_mirror_vm->fd, mirror_vm->fd);
- sev_migrate_from(dst_vm->fd, sev_vm->fd);
+ sev_migrate_from(dst_mirror_vm, mirror_vm);
+ sev_migrate_from(dst_vm, sev_vm);
kvm_vm_free(mirror_vm);
kvm_vm_free(dst_mirror_vm);
@@ -404,41 +393,25 @@ static void test_sev_move_copy(void)
kvm_vm_free(sev_vm);
}
-#define X86_FEATURE_SEV (1 << 1)
-#define X86_FEATURE_SEV_ES (1 << 3)
-
int main(int argc, char *argv[])
{
- struct kvm_cpuid_entry2 *cpuid;
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM));
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM));
- if (!kvm_check_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM) &&
- !kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) {
- print_skip("Capabilities not available");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SEV));
- cpuid = kvm_get_supported_cpuid_entry(0x80000000);
- if (cpuid->eax < 0x8000001f) {
- print_skip("AMD memory encryption not available");
- exit(KSFT_SKIP);
- }
- cpuid = kvm_get_supported_cpuid_entry(0x8000001f);
- if (!(cpuid->eax & X86_FEATURE_SEV)) {
- print_skip("AMD SEV not available");
- exit(KSFT_SKIP);
- }
- have_sev_es = !!(cpuid->eax & X86_FEATURE_SEV_ES);
+ have_sev_es = kvm_cpu_has(X86_FEATURE_SEV_ES);
- if (kvm_check_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)) {
+ if (kvm_has_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)) {
test_sev_migrate_from(/* es= */ false);
if (have_sev_es)
test_sev_migrate_from(/* es= */ true);
test_sev_migrate_locking();
test_sev_migrate_parameters();
- if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM))
+ if (kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM))
test_sev_move_copy();
}
- if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) {
+ if (kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) {
test_sev_mirror(/* es= */ false);
if (have_sev_es)
test_sev_mirror(/* es= */ true);
diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c
index b4e0c860769e..1f136a81858e 100644
--- a/tools/testing/selftests/kvm/x86_64/smm_test.c
+++ b/tools/testing/selftests/kvm/x86_64/smm_test.c
@@ -19,8 +19,6 @@
#include "vmx.h"
#include "svm_util.h"
-#define VCPU_ID 1
-
#define SMRAM_SIZE 65536
#define SMRAM_MEMSLOT ((1 << 16) | 1)
#define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
@@ -85,7 +83,7 @@ static void guest_code(void *arg)
sync_with_host(4);
if (arg) {
- if (cpu_has_svm()) {
+ if (this_cpu_has(X86_FEATURE_SVM)) {
generic_svm_setup(svm, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
} else {
@@ -101,7 +99,7 @@ static void guest_code(void *arg)
sync_with_host(7);
- if (cpu_has_svm()) {
+ if (this_cpu_has(X86_FEATURE_SVM)) {
run_guest(svm->vmcb, svm->vmcb_gpa);
run_guest(svm->vmcb, svm->vmcb_gpa);
} else {
@@ -116,22 +114,23 @@ static void guest_code(void *arg)
sync_with_host(DONE);
}
-void inject_smi(struct kvm_vm *vm)
+void inject_smi(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_events events;
- vcpu_events_get(vm, VCPU_ID, &events);
+ vcpu_events_get(vcpu, &events);
events.smi.pending = 1;
events.flags |= KVM_VCPUEVENT_VALID_SMM;
- vcpu_events_set(vm, VCPU_ID, &events);
+ vcpu_events_set(vcpu, &events);
}
int main(int argc, char *argv[])
{
vm_vaddr_t nested_gva = 0;
+ struct kvm_vcpu *vcpu;
struct kvm_regs regs;
struct kvm_vm *vm;
struct kvm_run *run;
@@ -139,9 +138,9 @@ int main(int argc, char *argv[])
int stage, stage_reported;
/* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
- run = vcpu_state(vm, VCPU_ID);
+ run = vcpu->run;
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA,
SMRAM_MEMSLOT, SMRAM_PAGES, 0);
@@ -152,29 +151,29 @@ int main(int argc, char *argv[])
memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler,
sizeof(smi_handler));
- vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA);
+ vcpu_set_msr(vcpu, MSR_IA32_SMBASE, SMRAM_GPA);
- if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
- if (nested_svm_supported())
+ if (kvm_has_cap(KVM_CAP_NESTED_STATE)) {
+ if (kvm_cpu_has(X86_FEATURE_SVM))
vcpu_alloc_svm(vm, &nested_gva);
- else if (nested_vmx_supported())
+ else if (kvm_cpu_has(X86_FEATURE_VMX))
vcpu_alloc_vmx(vm, &nested_gva);
}
if (!nested_gva)
pr_info("will skip SMM test with VMX enabled\n");
- vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
+ vcpu_args_set(vcpu, 1, nested_gva);
for (stage = 1;; stage++) {
- _vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
memset(&regs, 0, sizeof(regs));
- vcpu_regs_get(vm, VCPU_ID, &regs);
+ vcpu_regs_get(vcpu, &regs);
stage_reported = regs.rax & 0xff;
@@ -191,7 +190,7 @@ int main(int argc, char *argv[])
* return from it. Do not perform save/restore while in SMM yet.
*/
if (stage == 8) {
- inject_smi(vm);
+ inject_smi(vcpu);
continue;
}
@@ -200,15 +199,14 @@ int main(int argc, char *argv[])
* during L2 execution.
*/
if (stage == 10)
- inject_smi(vm);
+ inject_smi(vcpu);
- state = vcpu_save_state(vm, VCPU_ID);
+ state = vcpu_save_state(vcpu);
kvm_vm_release(vm);
- kvm_vm_restart(vm, O_RDWR);
- vm_vcpu_add(vm, VCPU_ID);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
- vcpu_load_state(vm, VCPU_ID, state);
- run = vcpu_state(vm, VCPU_ID);
+
+ vcpu = vm_recreate_with_one_vcpu(vm);
+ vcpu_load_state(vcpu, state);
+ run = vcpu->run;
kvm_x86_state_cleanup(state);
}
diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c
index 2e0a92da8ff5..ea578971fb9f 100644
--- a/tools/testing/selftests/kvm/x86_64/state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/state_test.c
@@ -20,7 +20,6 @@
#include "vmx.h"
#include "svm_util.h"
-#define VCPU_ID 5
#define L2_GUEST_STACK_SIZE 256
void svm_l2_guest_code(void)
@@ -143,7 +142,7 @@ static void __attribute__((__flatten__)) guest_code(void *arg)
GUEST_SYNC(2);
if (arg) {
- if (cpu_has_svm())
+ if (this_cpu_has(X86_FEATURE_SVM))
svm_l1_guest_code(arg);
else
vmx_l1_guest_code(arg);
@@ -157,6 +156,7 @@ int main(int argc, char *argv[])
vm_vaddr_t nested_gva = 0;
struct kvm_regs regs1, regs2;
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
struct kvm_x86_state *state;
@@ -164,34 +164,33 @@ int main(int argc, char *argv[])
int stage;
/* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code);
- run = vcpu_state(vm, VCPU_ID);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+ run = vcpu->run;
- vcpu_regs_get(vm, VCPU_ID, &regs1);
+ vcpu_regs_get(vcpu, &regs1);
- if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
- if (nested_svm_supported())
+ if (kvm_has_cap(KVM_CAP_NESTED_STATE)) {
+ if (kvm_cpu_has(X86_FEATURE_SVM))
vcpu_alloc_svm(vm, &nested_gva);
- else if (nested_vmx_supported())
+ else if (kvm_cpu_has(X86_FEATURE_VMX))
vcpu_alloc_vmx(vm, &nested_gva);
}
if (!nested_gva)
pr_info("will skip nested state checks\n");
- vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
+ vcpu_args_set(vcpu, 1, nested_gva);
for (stage = 1;; stage++) {
- _vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
- __FILE__, uc.args[1]);
+ REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
break;
@@ -206,22 +205,20 @@ int main(int argc, char *argv[])
uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
stage, (ulong)uc.args[1]);
- state = vcpu_save_state(vm, VCPU_ID);
+ state = vcpu_save_state(vcpu);
memset(&regs1, 0, sizeof(regs1));
- vcpu_regs_get(vm, VCPU_ID, &regs1);
+ vcpu_regs_get(vcpu, &regs1);
kvm_vm_release(vm);
/* Restore state in a new VM. */
- kvm_vm_restart(vm, O_RDWR);
- vm_vcpu_add(vm, VCPU_ID);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
- vcpu_load_state(vm, VCPU_ID, state);
- run = vcpu_state(vm, VCPU_ID);
+ vcpu = vm_recreate_with_one_vcpu(vm);
+ vcpu_load_state(vcpu, state);
+ run = vcpu->run;
kvm_x86_state_cleanup(state);
memset(&regs2, 0, sizeof(regs2));
- vcpu_regs_get(vm, VCPU_ID, &regs2);
+ vcpu_regs_get(vcpu, &regs2);
TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
"Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
(ulong) regs2.rdi, (ulong) regs2.rsi);
diff --git a/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c b/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c
index 30a81038df46..4a07ba227b99 100644
--- a/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c
+++ b/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c
@@ -13,10 +13,6 @@
#include "svm_util.h"
#include "apic.h"
-#define VCPU_ID 0
-
-static struct kvm_vm *vm;
-
bool vintr_irq_called;
bool intr_irq_called;
@@ -88,33 +84,36 @@ static void l1_guest_code(struct svm_test_data *svm)
int main(int argc, char *argv[])
{
+ struct kvm_vcpu *vcpu;
+ struct kvm_run *run;
vm_vaddr_t svm_gva;
+ struct kvm_vm *vm;
+ struct ucall uc;
- nested_svm_check_supported();
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
- vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, VCPU_ID);
+ vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, VINTR_IRQ_NUMBER, vintr_irq_handler);
vm_install_exception_handler(vm, INTR_IRQ_NUMBER, intr_irq_handler);
vcpu_alloc_svm(vm, &svm_gva);
- vcpu_args_set(vm, VCPU_ID, 1, svm_gva);
+ vcpu_args_set(vcpu, 1, svm_gva);
- struct kvm_run *run = vcpu_state(vm, VCPU_ID);
- struct ucall uc;
+ run = vcpu->run;
- vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- TEST_FAIL("%s", (const char *)uc.args[0]);
+ REPORT_GUEST_ASSERT(uc);
break;
/* NOT REACHED */
case UCALL_DONE:
diff --git a/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c b/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c
new file mode 100644
index 000000000000..e637d7736012
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Oracle and/or its affiliates.
+ *
+ * Based on:
+ * svm_int_ctl_test
+ *
+ * Copyright (C) 2021, Red Hat, Inc.
+ *
+ */
+
+#include <stdatomic.h>
+#include <stdio.h>
+#include <unistd.h>
+#include "apic.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "svm_util.h"
+#include "test_util.h"
+
+#define INT_NR 0x20
+
+static_assert(ATOMIC_INT_LOCK_FREE == 2, "atomic int is not lockless");
+
+static unsigned int bp_fired;
+static void guest_bp_handler(struct ex_regs *regs)
+{
+ bp_fired++;
+}
+
+static unsigned int int_fired;
+static void l2_guest_code_int(void);
+
+static void guest_int_handler(struct ex_regs *regs)
+{
+ int_fired++;
+ GUEST_ASSERT_2(regs->rip == (unsigned long)l2_guest_code_int,
+ regs->rip, (unsigned long)l2_guest_code_int);
+}
+
+static void l2_guest_code_int(void)
+{
+ GUEST_ASSERT_1(int_fired == 1, int_fired);
+ vmmcall();
+ ud2();
+
+ GUEST_ASSERT_1(bp_fired == 1, bp_fired);
+ hlt();
+}
+
+static atomic_int nmi_stage;
+#define nmi_stage_get() atomic_load_explicit(&nmi_stage, memory_order_acquire)
+#define nmi_stage_inc() atomic_fetch_add_explicit(&nmi_stage, 1, memory_order_acq_rel)
+static void guest_nmi_handler(struct ex_regs *regs)
+{
+ nmi_stage_inc();
+
+ if (nmi_stage_get() == 1) {
+ vmmcall();
+ GUEST_ASSERT(false);
+ } else {
+ GUEST_ASSERT_1(nmi_stage_get() == 3, nmi_stage_get());
+ GUEST_DONE();
+ }
+}
+
+static void l2_guest_code_nmi(void)
+{
+ ud2();
+}
+
+static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t idt_alt)
+{
+ #define L2_GUEST_STACK_SIZE 64
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ struct vmcb *vmcb = svm->vmcb;
+
+ if (is_nmi)
+ x2apic_enable();
+
+ /* Prepare for L2 execution. */
+ generic_svm_setup(svm,
+ is_nmi ? l2_guest_code_nmi : l2_guest_code_int,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ vmcb->control.intercept_exceptions |= BIT(PF_VECTOR) | BIT(UD_VECTOR);
+ vmcb->control.intercept |= BIT(INTERCEPT_NMI) | BIT(INTERCEPT_HLT);
+
+ if (is_nmi) {
+ vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
+ } else {
+ vmcb->control.event_inj = INT_NR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_SOFT;
+ /* The return address pushed on stack */
+ vmcb->control.next_rip = vmcb->save.rip;
+ }
+
+ run_guest(vmcb, svm->vmcb_gpa);
+ GUEST_ASSERT_3(vmcb->control.exit_code == SVM_EXIT_VMMCALL,
+ vmcb->control.exit_code,
+ vmcb->control.exit_info_1, vmcb->control.exit_info_2);
+
+ if (is_nmi) {
+ clgi();
+ x2apic_write_reg(APIC_ICR, APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_NMI);
+
+ GUEST_ASSERT_1(nmi_stage_get() == 1, nmi_stage_get());
+ nmi_stage_inc();
+
+ stgi();
+ /* self-NMI happens here */
+ while (true)
+ cpu_relax();
+ }
+
+ /* Skip over VMMCALL */
+ vmcb->save.rip += 3;
+
+ /* Switch to alternate IDT to cause intervening NPF again */
+ vmcb->save.idtr.base = idt_alt;
+ vmcb->control.clean = 0; /* &= ~BIT(VMCB_DT) would be enough */
+
+ vmcb->control.event_inj = BP_VECTOR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
+ /* The return address pushed on stack, skip over UD2 */
+ vmcb->control.next_rip = vmcb->save.rip + 2;
+
+ run_guest(vmcb, svm->vmcb_gpa);
+ GUEST_ASSERT_3(vmcb->control.exit_code == SVM_EXIT_HLT,
+ vmcb->control.exit_code,
+ vmcb->control.exit_info_1, vmcb->control.exit_info_2);
+
+ GUEST_DONE();
+}
+
+static void run_test(bool is_nmi)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ vm_vaddr_t svm_gva;
+ vm_vaddr_t idt_alt_vm;
+ struct kvm_guest_debug debug;
+
+ pr_info("Running %s test\n", is_nmi ? "NMI" : "soft int");
+
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
+
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(vcpu);
+
+ vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler);
+ vm_install_exception_handler(vm, BP_VECTOR, guest_bp_handler);
+ vm_install_exception_handler(vm, INT_NR, guest_int_handler);
+
+ vcpu_alloc_svm(vm, &svm_gva);
+
+ if (!is_nmi) {
+ void *idt, *idt_alt;
+
+ idt_alt_vm = vm_vaddr_alloc_page(vm);
+ idt_alt = addr_gva2hva(vm, idt_alt_vm);
+ idt = addr_gva2hva(vm, vm->idt);
+ memcpy(idt_alt, idt, getpagesize());
+ } else {
+ idt_alt_vm = 0;
+ }
+ vcpu_args_set(vcpu, 3, svm_gva, (uint64_t)is_nmi, (uint64_t)idt_alt_vm);
+
+ memset(&debug, 0, sizeof(debug));
+ vcpu_guest_debug_set(vcpu, &debug);
+
+ struct kvm_run *run = vcpu->run;
+ struct ucall uc;
+
+ alarm(2);
+ vcpu_run(vcpu);
+ alarm(0);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT_3(uc, "vals = 0x%lx 0x%lx 0x%lx");
+ break;
+ /* NOT REACHED */
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
+ }
+done:
+ kvm_vm_free(vm);
+}
+
+int main(int argc, char *argv[])
+{
+ /* Tell stdout not to buffer its content */
+ setbuf(stdout, NULL);
+
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
+
+ TEST_ASSERT(kvm_cpu_has(X86_FEATURE_NRIPS),
+ "KVM with nSVM is supposed to unconditionally advertise nRIP Save");
+
+ atomic_init(&nmi_stage, 0);
+
+ run_test(false);
+ run_test(true);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c
index be2ca157485b..c3ac45df7483 100644
--- a/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c
+++ b/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c
@@ -12,10 +12,6 @@
#include "processor.h"
#include "svm_util.h"
-#define VCPU_ID 5
-
-static struct kvm_vm *vm;
-
static void l2_guest_code(struct svm_test_data *svm)
{
__asm__ __volatile__("vmcall");
@@ -39,28 +35,30 @@ static void l1_guest_code(struct svm_test_data *svm)
int main(int argc, char *argv[])
{
+ struct kvm_vcpu *vcpu;
vm_vaddr_t svm_gva;
+ struct kvm_vm *vm;
- nested_svm_check_supported();
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
- vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vcpu_alloc_svm(vm, &svm_gva);
- vcpu_args_set(vm, VCPU_ID, 1, svm_gva);
+ vcpu_args_set(vcpu, 1, svm_gva);
for (;;) {
- volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
- vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- TEST_FAIL("%s", (const char *)uc.args[0]);
+ REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
break;
diff --git a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
index fc03a150278d..9b6db0b0b13e 100644
--- a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
@@ -20,8 +20,6 @@
#include "kvm_util.h"
#include "processor.h"
-#define VCPU_ID 5
-
#define UCALL_PIO_PORT ((uint16_t)0x1000)
struct ucall uc_none = {
@@ -84,6 +82,7 @@ static void compare_vcpu_events(struct kvm_vcpu_events *left,
int main(int argc, char *argv[])
{
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
struct kvm_regs regs;
@@ -95,66 +94,59 @@ int main(int argc, char *argv[])
setbuf(stdout, NULL);
cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
- if ((cap & TEST_SYNC_FIELDS) != TEST_SYNC_FIELDS) {
- print_skip("KVM_CAP_SYNC_REGS not supported");
- exit(KSFT_SKIP);
- }
- if ((cap & INVALID_SYNC_FIELD) != 0) {
- print_skip("The \"invalid\" field is not invalid");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE((cap & TEST_SYNC_FIELDS) == TEST_SYNC_FIELDS);
+ TEST_REQUIRE(!(cap & INVALID_SYNC_FIELD));
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
- run = vcpu_state(vm, VCPU_ID);
+ run = vcpu->run;
/* Request reading invalid register set from VCPU. */
run->kvm_valid_regs = INVALID_SYNC_FIELD;
- rv = _vcpu_run(vm, VCPU_ID);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv);
- vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0;
+ run->kvm_valid_regs = 0;
run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
- rv = _vcpu_run(vm, VCPU_ID);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv);
- vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0;
+ run->kvm_valid_regs = 0;
/* Request setting invalid register set into VCPU. */
run->kvm_dirty_regs = INVALID_SYNC_FIELD;
- rv = _vcpu_run(vm, VCPU_ID);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv);
- vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0;
+ run->kvm_dirty_regs = 0;
run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
- rv = _vcpu_run(vm, VCPU_ID);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv);
- vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0;
+ run->kvm_dirty_regs = 0;
/* Request and verify all valid register sets. */
/* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */
run->kvm_valid_regs = TEST_SYNC_FIELDS;
- rv = _vcpu_run(vm, VCPU_ID);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- vcpu_regs_get(vm, VCPU_ID, &regs);
+ vcpu_regs_get(vcpu, &regs);
compare_regs(&regs, &run->s.regs.regs);
- vcpu_sregs_get(vm, VCPU_ID, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
compare_sregs(&sregs, &run->s.regs.sregs);
- vcpu_events_get(vm, VCPU_ID, &events);
+ vcpu_events_get(vcpu, &events);
compare_vcpu_events(&events, &run->s.regs.events);
/* Set and verify various register values. */
@@ -164,7 +156,7 @@ int main(int argc, char *argv[])
run->kvm_valid_regs = TEST_SYNC_FIELDS;
run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS;
- rv = _vcpu_run(vm, VCPU_ID);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
@@ -176,13 +168,13 @@ int main(int argc, char *argv[])
"apic_base sync regs value incorrect 0x%llx.",
run->s.regs.sregs.apic_base);
- vcpu_regs_get(vm, VCPU_ID, &regs);
+ vcpu_regs_get(vcpu, &regs);
compare_regs(&regs, &run->s.regs.regs);
- vcpu_sregs_get(vm, VCPU_ID, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
compare_sregs(&sregs, &run->s.regs.sregs);
- vcpu_events_get(vm, VCPU_ID, &events);
+ vcpu_events_get(vcpu, &events);
compare_vcpu_events(&events, &run->s.regs.events);
/* Clear kvm_dirty_regs bits, verify new s.regs values are
@@ -191,7 +183,7 @@ int main(int argc, char *argv[])
run->kvm_valid_regs = TEST_SYNC_FIELDS;
run->kvm_dirty_regs = 0;
run->s.regs.regs.rbx = 0xDEADBEEF;
- rv = _vcpu_run(vm, VCPU_ID);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
@@ -208,8 +200,8 @@ int main(int argc, char *argv[])
run->kvm_dirty_regs = 0;
run->s.regs.regs.rbx = 0xAAAA;
regs.rbx = 0xBAC0;
- vcpu_regs_set(vm, VCPU_ID, &regs);
- rv = _vcpu_run(vm, VCPU_ID);
+ vcpu_regs_set(vcpu, &regs);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
@@ -217,7 +209,7 @@ int main(int argc, char *argv[])
TEST_ASSERT(run->s.regs.regs.rbx == 0xAAAA,
"rbx sync regs value incorrect 0x%llx.",
run->s.regs.regs.rbx);
- vcpu_regs_get(vm, VCPU_ID, &regs);
+ vcpu_regs_get(vcpu, &regs);
TEST_ASSERT(regs.rbx == 0xBAC0 + 1,
"rbx guest value incorrect 0x%llx.",
regs.rbx);
@@ -229,7 +221,7 @@ int main(int argc, char *argv[])
run->kvm_valid_regs = 0;
run->kvm_dirty_regs = TEST_SYNC_FIELDS;
run->s.regs.regs.rbx = 0xBBBB;
- rv = _vcpu_run(vm, VCPU_ID);
+ rv = _vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
@@ -237,7 +229,7 @@ int main(int argc, char *argv[])
TEST_ASSERT(run->s.regs.regs.rbx == 0xBBBB,
"rbx sync regs value incorrect 0x%llx.",
run->s.regs.regs.rbx);
- vcpu_regs_get(vm, VCPU_ID, &regs);
+ vcpu_regs_get(vcpu, &regs);
TEST_ASSERT(regs.rbx == 0xBBBB + 1,
"rbx guest value incorrect 0x%llx.",
regs.rbx);
diff --git a/tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c b/tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c
new file mode 100644
index 000000000000..70b44f0b52fe
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "vmx.h"
+
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "kselftest.h"
+
+#define ARBITRARY_IO_PORT 0x2000
+
+/* The virtual machine object. */
+static struct kvm_vm *vm;
+
+static void l2_guest_code(void)
+{
+ asm volatile("inb %%dx, %%al"
+ : : [port] "d" (ARBITRARY_IO_PORT) : "rax");
+}
+
+void l1_guest_code(struct vmx_pages *vmx)
+{
+#define L2_GUEST_STACK_SIZE 64
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+ GUEST_ASSERT(vmx->vmcs_gpa);
+ GUEST_ASSERT(prepare_for_vmx_operation(vmx));
+ GUEST_ASSERT(load_vmcs(vmx));
+
+ prepare_vmcs(vmx, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ GUEST_ASSERT(!vmlaunch());
+ /* L2 should triple fault after a triple fault event injected. */
+ GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_TRIPLE_FAULT);
+ GUEST_DONE();
+}
+
+int main(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_run *run;
+ struct kvm_vcpu_events events;
+ vm_vaddr_t vmx_pages_gva;
+ struct ucall uc;
+
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
+
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_TRIPLE_FAULT_EVENT));
+
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
+ vm_enable_cap(vm, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 1);
+
+ run = vcpu->run;
+ vcpu_alloc_vmx(vm, &vmx_pages_gva);
+ vcpu_args_set(vcpu, 1, vmx_pages_gva);
+ vcpu_run(vcpu);
+
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Expected KVM_EXIT_IO, got: %u (%s)\n",
+ run->exit_reason, exit_reason_str(run->exit_reason));
+ TEST_ASSERT(run->io.port == ARBITRARY_IO_PORT,
+ "Expected IN from port %d from L2, got port %d",
+ ARBITRARY_IO_PORT, run->io.port);
+ vcpu_events_get(vcpu, &events);
+ events.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
+ events.triple_fault.pending = true;
+ vcpu_events_set(vcpu, &events);
+ run->immediate_exit = true;
+ vcpu_run_complete_io(vcpu);
+
+ vcpu_events_get(vcpu, &events);
+ TEST_ASSERT(events.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT,
+ "Triple fault event invalid");
+ TEST_ASSERT(events.triple_fault.pending,
+ "No triple fault pending");
+ vcpu_run(vcpu);
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_DONE:
+ break;
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ default:
+ TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
+ }
+
+}
diff --git a/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c b/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c
index a426078b16a3..22d366c697f7 100644
--- a/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c
@@ -9,14 +9,12 @@
#include "kvm_util.h"
#include "processor.h"
-#define VCPU_ID 0
-
#define UNITY (1ull << 30)
#define HOST_ADJUST (UNITY * 64)
#define GUEST_STEP (UNITY * 4)
#define ROUND(x) ((x + UNITY / 2) & -UNITY)
#define rounded_rdmsr(x) ROUND(rdmsr(x))
-#define rounded_host_rdmsr(x) ROUND(vcpu_get_msr(vm, 0, x))
+#define rounded_host_rdmsr(x) ROUND(vcpu_get_msr(vcpu, x))
static void guest_code(void)
{
@@ -66,15 +64,13 @@ static void guest_code(void)
GUEST_DONE();
}
-static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid, int stage)
+static void run_vcpu(struct kvm_vcpu *vcpu, int stage)
{
struct ucall uc;
- vcpu_args_set(vm, vcpuid, 1, vcpuid);
-
- vcpu_ioctl(vm, vcpuid, KVM_RUN, NULL);
+ vcpu_run(vcpu);
- switch (get_ucall(vm, vcpuid, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
uc.args[1] == stage + 1, "Stage %d: Unexpected register values vmexit, got %lx",
@@ -83,34 +79,33 @@ static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid, int stage)
case UCALL_DONE:
return;
case UCALL_ABORT:
- TEST_ASSERT(false, "%s at %s:%ld\n" \
- "\tvalues: %#lx, %#lx", (const char *)uc.args[0],
- __FILE__, uc.args[1], uc.args[2], uc.args[3]);
+ REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
default:
TEST_ASSERT(false, "Unexpected exit: %s",
- exit_reason_str(vcpu_state(vm, vcpuid)->exit_reason));
+ exit_reason_str(vcpu->run->exit_reason));
}
}
int main(void)
{
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
uint64_t val;
- vm = vm_create_default(VCPU_ID, 0, guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
val = 0;
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/* Guest: writes to MSR_IA32_TSC affect both MSRs. */
- run_vcpu(vm, VCPU_ID, 1);
+ run_vcpu(vcpu, 1);
val = 1ull * GUEST_STEP;
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/* Guest: writes to MSR_IA32_TSC_ADJUST affect both MSRs. */
- run_vcpu(vm, VCPU_ID, 2);
+ run_vcpu(vcpu, 2);
val = 2ull * GUEST_STEP;
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
@@ -119,18 +114,18 @@ int main(void)
* Host: writes to MSR_IA32_TSC set the host-side offset
* and therefore do not change MSR_IA32_TSC_ADJUST.
*/
- vcpu_set_msr(vm, 0, MSR_IA32_TSC, HOST_ADJUST + val);
+ vcpu_set_msr(vcpu, MSR_IA32_TSC, HOST_ADJUST + val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
- run_vcpu(vm, VCPU_ID, 3);
+ run_vcpu(vcpu, 3);
/* Host: writes to MSR_IA32_TSC_ADJUST do not modify the TSC. */
- vcpu_set_msr(vm, 0, MSR_IA32_TSC_ADJUST, UNITY * 123456);
+ vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, UNITY * 123456);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
- ASSERT_EQ(vcpu_get_msr(vm, 0, MSR_IA32_TSC_ADJUST), UNITY * 123456);
+ ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_TSC_ADJUST), UNITY * 123456);
/* Restore previous value. */
- vcpu_set_msr(vm, 0, MSR_IA32_TSC_ADJUST, val);
+ vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
@@ -138,7 +133,7 @@ int main(void)
* Guest: writes to MSR_IA32_TSC_ADJUST do not destroy the
* host-side offset and affect both MSRs.
*/
- run_vcpu(vm, VCPU_ID, 4);
+ run_vcpu(vcpu, 4);
val = 3ull * GUEST_STEP;
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
@@ -147,7 +142,7 @@ int main(void)
* Guest: writes to MSR_IA32_TSC affect both MSRs, so the host-side
* offset is now visible in MSR_IA32_TSC_ADJUST.
*/
- run_vcpu(vm, VCPU_ID, 5);
+ run_vcpu(vcpu, 5);
val = 4ull * GUEST_STEP;
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST);
diff --git a/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c b/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c
new file mode 100644
index 000000000000..47139aab7408
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * svm_vmcall_test
+ *
+ * Copyright © 2021 Amazon.com, Inc. or its affiliates.
+ *
+ * Xen shared_info / pvclock testing
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+
+#include <stdint.h>
+#include <time.h>
+#include <sched.h>
+#include <signal.h>
+#include <pthread.h>
+
+#define NR_TEST_VCPUS 20
+
+static struct kvm_vm *vm;
+pthread_spinlock_t create_lock;
+
+#define TEST_TSC_KHZ 2345678UL
+#define TEST_TSC_OFFSET 200000000
+
+uint64_t tsc_sync;
+static void guest_code(void)
+{
+ uint64_t start_tsc, local_tsc, tmp;
+
+ start_tsc = rdtsc();
+ do {
+ tmp = READ_ONCE(tsc_sync);
+ local_tsc = rdtsc();
+ WRITE_ONCE(tsc_sync, local_tsc);
+ if (unlikely(local_tsc < tmp))
+ GUEST_SYNC_ARGS(0, local_tsc, tmp, 0, 0);
+
+ } while (local_tsc - start_tsc < 5000 * TEST_TSC_KHZ);
+
+ GUEST_DONE();
+}
+
+
+static void *run_vcpu(void *_cpu_nr)
+{
+ unsigned long vcpu_id = (unsigned long)_cpu_nr;
+ unsigned long failures = 0;
+ static bool first_cpu_done;
+ struct kvm_vcpu *vcpu;
+
+ /* The kernel is fine, but vm_vcpu_add() needs locking */
+ pthread_spin_lock(&create_lock);
+
+ vcpu = vm_vcpu_add(vm, vcpu_id, guest_code);
+
+ if (!first_cpu_done) {
+ first_cpu_done = true;
+ vcpu_set_msr(vcpu, MSR_IA32_TSC, TEST_TSC_OFFSET);
+ }
+
+ pthread_spin_unlock(&create_lock);
+
+ for (;;) {
+ volatile struct kvm_run *run = vcpu->run;
+ struct ucall uc;
+
+ vcpu_run(vcpu);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_DONE:
+ goto out;
+
+ case UCALL_SYNC:
+ printf("Guest %d sync %lx %lx %ld\n", vcpu->id,
+ uc.args[2], uc.args[3], uc.args[2] - uc.args[3]);
+ failures++;
+ break;
+
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+ }
+ out:
+ return (void *)failures;
+}
+
+int main(int argc, char *argv[])
+{
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_TSC_CONTROL));
+
+ vm = vm_create(NR_TEST_VCPUS);
+ vm_ioctl(vm, KVM_SET_TSC_KHZ, (void *) TEST_TSC_KHZ);
+
+ pthread_spin_init(&create_lock, PTHREAD_PROCESS_PRIVATE);
+ pthread_t cpu_threads[NR_TEST_VCPUS];
+ unsigned long cpu;
+ for (cpu = 0; cpu < NR_TEST_VCPUS; cpu++)
+ pthread_create(&cpu_threads[cpu], NULL, run_vcpu, (void *)cpu);
+
+ unsigned long failures = 0;
+ for (cpu = 0; cpu < NR_TEST_VCPUS; cpu++) {
+ void *this_cpu_failures;
+ pthread_join(cpu_threads[cpu], &this_cpu_failures);
+ failures += (unsigned long)this_cpu_failures;
+ }
+
+ TEST_ASSERT(!failures, "TSC sync failed");
+ pthread_spin_destroy(&create_lock);
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/ucna_injection_test.c b/tools/testing/selftests/kvm/x86_64/ucna_injection_test.c
new file mode 100644
index 000000000000..a897c7fd8abe
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/ucna_injection_test.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ucna_injection_test
+ *
+ * Copyright (C) 2022, Google LLC.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Test that user space can inject UnCorrectable No Action required (UCNA)
+ * memory errors to the guest.
+ *
+ * The test starts one vCPU with the MCG_CMCI_P enabled. It verifies that
+ * proper UCNA errors can be injected to a vCPU with MCG_CMCI_P and
+ * corresponding per-bank control register (MCI_CTL2) bit enabled.
+ * The test also checks that the UCNA errors get recorded in the
+ * Machine Check bank registers no matter the error signal interrupts get
+ * delivered into the guest or not.
+ *
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <pthread.h>
+#include <inttypes.h>
+#include <string.h>
+#include <time.h>
+
+#include "kvm_util_base.h"
+#include "kvm_util.h"
+#include "mce.h"
+#include "processor.h"
+#include "test_util.h"
+#include "apic.h"
+
+#define SYNC_FIRST_UCNA 9
+#define SYNC_SECOND_UCNA 10
+#define SYNC_GP 11
+#define FIRST_UCNA_ADDR 0xdeadbeef
+#define SECOND_UCNA_ADDR 0xcafeb0ba
+
+/*
+ * Vector for the CMCI interrupt.
+ * Value is arbitrary. Any value in 0x20-0xFF should work:
+ * https://wiki.osdev.org/Interrupt_Vector_Table
+ */
+#define CMCI_VECTOR 0xa9
+
+#define UCNA_BANK 0x7 // IMC0 bank
+
+#define MCI_CTL2_RESERVED_BIT BIT_ULL(29)
+
+static uint64_t supported_mcg_caps;
+
+/*
+ * Record states about the injected UCNA.
+ * The variables started with the 'i_' prefixes are recorded in interrupt
+ * handler. Variables without the 'i_' prefixes are recorded in guest main
+ * execution thread.
+ */
+static volatile uint64_t i_ucna_rcvd;
+static volatile uint64_t i_ucna_addr;
+static volatile uint64_t ucna_addr;
+static volatile uint64_t ucna_addr2;
+
+struct thread_params {
+ struct kvm_vcpu *vcpu;
+ uint64_t *p_i_ucna_rcvd;
+ uint64_t *p_i_ucna_addr;
+ uint64_t *p_ucna_addr;
+ uint64_t *p_ucna_addr2;
+};
+
+static void verify_apic_base_addr(void)
+{
+ uint64_t msr = rdmsr(MSR_IA32_APICBASE);
+ uint64_t base = GET_APIC_BASE(msr);
+
+ GUEST_ASSERT(base == APIC_DEFAULT_GPA);
+}
+
+static void ucna_injection_guest_code(void)
+{
+ uint64_t ctl2;
+ verify_apic_base_addr();
+ xapic_enable();
+
+ /* Sets up the interrupt vector and enables per-bank CMCI sigaling. */
+ xapic_write_reg(APIC_LVTCMCI, CMCI_VECTOR | APIC_DM_FIXED);
+ ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK));
+ wrmsr(MSR_IA32_MCx_CTL2(UCNA_BANK), ctl2 | MCI_CTL2_CMCI_EN);
+
+ /* Enables interrupt in guest. */
+ asm volatile("sti");
+
+ /* Let user space inject the first UCNA */
+ GUEST_SYNC(SYNC_FIRST_UCNA);
+
+ ucna_addr = rdmsr(MSR_IA32_MCx_ADDR(UCNA_BANK));
+
+ /* Disables the per-bank CMCI signaling. */
+ ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK));
+ wrmsr(MSR_IA32_MCx_CTL2(UCNA_BANK), ctl2 & ~MCI_CTL2_CMCI_EN);
+
+ /* Let the user space inject the second UCNA */
+ GUEST_SYNC(SYNC_SECOND_UCNA);
+
+ ucna_addr2 = rdmsr(MSR_IA32_MCx_ADDR(UCNA_BANK));
+ GUEST_DONE();
+}
+
+static void cmci_disabled_guest_code(void)
+{
+ uint64_t ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK));
+ wrmsr(MSR_IA32_MCx_CTL2(UCNA_BANK), ctl2 | MCI_CTL2_CMCI_EN);
+
+ GUEST_DONE();
+}
+
+static void cmci_enabled_guest_code(void)
+{
+ uint64_t ctl2 = rdmsr(MSR_IA32_MCx_CTL2(UCNA_BANK));
+ wrmsr(MSR_IA32_MCx_CTL2(UCNA_BANK), ctl2 | MCI_CTL2_RESERVED_BIT);
+
+ GUEST_DONE();
+}
+
+static void guest_cmci_handler(struct ex_regs *regs)
+{
+ i_ucna_rcvd++;
+ i_ucna_addr = rdmsr(MSR_IA32_MCx_ADDR(UCNA_BANK));
+ xapic_write_reg(APIC_EOI, 0);
+}
+
+static void guest_gp_handler(struct ex_regs *regs)
+{
+ GUEST_SYNC(SYNC_GP);
+}
+
+static void run_vcpu_expect_gp(struct kvm_vcpu *vcpu)
+{
+ unsigned int exit_reason;
+ struct ucall uc;
+
+ vcpu_run(vcpu);
+
+ exit_reason = vcpu->run->exit_reason;
+ TEST_ASSERT(exit_reason == KVM_EXIT_IO,
+ "exited with unexpected exit reason %u-%s, expected KVM_EXIT_IO",
+ exit_reason, exit_reason_str(exit_reason));
+ TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_SYNC,
+ "Expect UCALL_SYNC\n");
+ TEST_ASSERT(uc.args[1] == SYNC_GP, "#GP is expected.");
+ printf("vCPU received GP in guest.\n");
+}
+
+static void inject_ucna(struct kvm_vcpu *vcpu, uint64_t addr) {
+ /*
+ * A UCNA error is indicated with VAL=1, UC=1, PCC=0, S=0 and AR=0 in
+ * the IA32_MCi_STATUS register.
+ * MSCOD=1 (BIT[16] - MscodDataRdErr).
+ * MCACOD=0x0090 (Memory controller error format, channel 0)
+ */
+ uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
+ MCI_STATUS_MISCV | MCI_STATUS_ADDRV | 0x10090;
+ struct kvm_x86_mce mce = {};
+ mce.status = status;
+ mce.mcg_status = 0;
+ /*
+ * MCM_ADDR_PHYS indicates the reported address is a physical address.
+ * Lowest 6 bits is the recoverable address LSB, i.e., the injected MCE
+ * is at 4KB granularity.
+ */
+ mce.misc = (MCM_ADDR_PHYS << 6) | 0xc;
+ mce.addr = addr;
+ mce.bank = UCNA_BANK;
+
+ vcpu_ioctl(vcpu, KVM_X86_SET_MCE, &mce);
+}
+
+static void *run_ucna_injection(void *arg)
+{
+ struct thread_params *params = (struct thread_params *)arg;
+ struct ucall uc;
+ int old;
+ int r;
+ unsigned int exit_reason;
+
+ r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
+ TEST_ASSERT(r == 0,
+ "pthread_setcanceltype failed with errno=%d",
+ r);
+
+ vcpu_run(params->vcpu);
+
+ exit_reason = params->vcpu->run->exit_reason;
+ TEST_ASSERT(exit_reason == KVM_EXIT_IO,
+ "unexpected exit reason %u-%s, expected KVM_EXIT_IO",
+ exit_reason, exit_reason_str(exit_reason));
+ TEST_ASSERT(get_ucall(params->vcpu, &uc) == UCALL_SYNC,
+ "Expect UCALL_SYNC\n");
+ TEST_ASSERT(uc.args[1] == SYNC_FIRST_UCNA, "Injecting first UCNA.");
+
+ printf("Injecting first UCNA at %#x.\n", FIRST_UCNA_ADDR);
+
+ inject_ucna(params->vcpu, FIRST_UCNA_ADDR);
+ vcpu_run(params->vcpu);
+
+ exit_reason = params->vcpu->run->exit_reason;
+ TEST_ASSERT(exit_reason == KVM_EXIT_IO,
+ "unexpected exit reason %u-%s, expected KVM_EXIT_IO",
+ exit_reason, exit_reason_str(exit_reason));
+ TEST_ASSERT(get_ucall(params->vcpu, &uc) == UCALL_SYNC,
+ "Expect UCALL_SYNC\n");
+ TEST_ASSERT(uc.args[1] == SYNC_SECOND_UCNA, "Injecting second UCNA.");
+
+ printf("Injecting second UCNA at %#x.\n", SECOND_UCNA_ADDR);
+
+ inject_ucna(params->vcpu, SECOND_UCNA_ADDR);
+ vcpu_run(params->vcpu);
+
+ exit_reason = params->vcpu->run->exit_reason;
+ TEST_ASSERT(exit_reason == KVM_EXIT_IO,
+ "unexpected exit reason %u-%s, expected KVM_EXIT_IO",
+ exit_reason, exit_reason_str(exit_reason));
+ if (get_ucall(params->vcpu, &uc) == UCALL_ABORT) {
+ TEST_ASSERT(false, "vCPU assertion failure: %s.\n",
+ (const char *)uc.args[0]);
+ }
+
+ return NULL;
+}
+
+static void test_ucna_injection(struct kvm_vcpu *vcpu, struct thread_params *params)
+{
+ struct kvm_vm *vm = vcpu->vm;
+ params->vcpu = vcpu;
+ params->p_i_ucna_rcvd = (uint64_t *)addr_gva2hva(vm, (uint64_t)&i_ucna_rcvd);
+ params->p_i_ucna_addr = (uint64_t *)addr_gva2hva(vm, (uint64_t)&i_ucna_addr);
+ params->p_ucna_addr = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ucna_addr);
+ params->p_ucna_addr2 = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ucna_addr2);
+
+ run_ucna_injection(params);
+
+ TEST_ASSERT(*params->p_i_ucna_rcvd == 1, "Only first UCNA get signaled.");
+ TEST_ASSERT(*params->p_i_ucna_addr == FIRST_UCNA_ADDR,
+ "Only first UCNA reported addr get recorded via interrupt.");
+ TEST_ASSERT(*params->p_ucna_addr == FIRST_UCNA_ADDR,
+ "First injected UCNAs should get exposed via registers.");
+ TEST_ASSERT(*params->p_ucna_addr2 == SECOND_UCNA_ADDR,
+ "Second injected UCNAs should get exposed via registers.");
+
+ printf("Test successful.\n"
+ "UCNA CMCI interrupts received: %ld\n"
+ "Last UCNA address received via CMCI: %lx\n"
+ "First UCNA address in vCPU thread: %lx\n"
+ "Second UCNA address in vCPU thread: %lx\n",
+ *params->p_i_ucna_rcvd, *params->p_i_ucna_addr,
+ *params->p_ucna_addr, *params->p_ucna_addr2);
+}
+
+static void setup_mce_cap(struct kvm_vcpu *vcpu, bool enable_cmci_p)
+{
+ uint64_t mcg_caps = MCG_CTL_P | MCG_SER_P | MCG_LMCE_P | KVM_MAX_MCE_BANKS;
+ if (enable_cmci_p)
+ mcg_caps |= MCG_CMCI_P;
+
+ mcg_caps &= supported_mcg_caps | MCG_CAP_BANKS_MASK;
+ vcpu_ioctl(vcpu, KVM_X86_SETUP_MCE, &mcg_caps);
+}
+
+static struct kvm_vcpu *create_vcpu_with_mce_cap(struct kvm_vm *vm, uint32_t vcpuid,
+ bool enable_cmci_p, void *guest_code)
+{
+ struct kvm_vcpu *vcpu = vm_vcpu_add(vm, vcpuid, guest_code);
+ setup_mce_cap(vcpu, enable_cmci_p);
+ return vcpu;
+}
+
+int main(int argc, char *argv[])
+{
+ struct thread_params params;
+ struct kvm_vm *vm;
+ struct kvm_vcpu *ucna_vcpu;
+ struct kvm_vcpu *cmcidis_vcpu;
+ struct kvm_vcpu *cmci_vcpu;
+
+ kvm_check_cap(KVM_CAP_MCE);
+
+ vm = __vm_create(VM_MODE_DEFAULT, 3, 0);
+
+ kvm_ioctl(vm->kvm_fd, KVM_X86_GET_MCE_CAP_SUPPORTED,
+ &supported_mcg_caps);
+
+ if (!(supported_mcg_caps & MCG_CMCI_P)) {
+ print_skip("MCG_CMCI_P is not supported");
+ exit(KSFT_SKIP);
+ }
+
+ ucna_vcpu = create_vcpu_with_mce_cap(vm, 0, true, ucna_injection_guest_code);
+ cmcidis_vcpu = create_vcpu_with_mce_cap(vm, 1, false, cmci_disabled_guest_code);
+ cmci_vcpu = create_vcpu_with_mce_cap(vm, 2, true, cmci_enabled_guest_code);
+
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(ucna_vcpu);
+ vcpu_init_descriptor_tables(cmcidis_vcpu);
+ vcpu_init_descriptor_tables(cmci_vcpu);
+ vm_install_exception_handler(vm, CMCI_VECTOR, guest_cmci_handler);
+ vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
+
+ virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
+
+ test_ucna_injection(ucna_vcpu, &params);
+ run_vcpu_expect_gp(cmcidis_vcpu);
+ run_vcpu_expect_gp(cmci_vcpu);
+
+ kvm_vm_free(vm);
+}
diff --git a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c
index e4bef2e05686..7316521428f8 100644
--- a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c
+++ b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c
@@ -10,8 +10,6 @@
#include "kvm_util.h"
#include "processor.h"
-#define VCPU_ID 1
-
static void guest_ins_port80(uint8_t *buffer, unsigned int count)
{
unsigned long end;
@@ -52,31 +50,29 @@ static void guest_code(void)
int main(int argc, char *argv[])
{
+ struct kvm_vcpu *vcpu;
struct kvm_regs regs;
struct kvm_run *run;
struct kvm_vm *vm;
struct ucall uc;
- int rc;
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code);
- run = vcpu_state(vm, VCPU_ID);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+ run = vcpu->run;
memset(&regs, 0, sizeof(regs));
while (1) {
- rc = _vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
- TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- if (get_ucall(vm, VCPU_ID, &uc))
+ if (get_ucall(vcpu, &uc))
break;
TEST_ASSERT(run->io.port == 0x80,
@@ -89,22 +85,20 @@ int main(int argc, char *argv[])
* scope from a testing perspective as it's not ABI in any way,
* i.e. it really is abusing internal KVM knowledge.
*/
- vcpu_regs_get(vm, VCPU_ID, &regs);
+ vcpu_regs_get(vcpu, &regs);
if (regs.rcx == 2)
regs.rcx = 1;
if (regs.rcx == 3)
regs.rcx = 8192;
memset((void *)run + run->io.data_offset, 0xaa, 4096);
- vcpu_regs_set(vm, VCPU_ID, &regs);
+ vcpu_regs_set(vcpu, &regs);
}
switch (uc.cmd) {
case UCALL_DONE:
break;
case UCALL_ABORT:
- TEST_FAIL("%s at %s:%ld : argN+1 = 0x%lx, argN+2 = 0x%lx",
- (const char *)uc.args[0], __FILE__, uc.args[1],
- uc.args[2], uc.args[3]);
+ REPORT_GUEST_ASSERT_2(uc, "argN+1 = 0x%lx, argN+2 = 0x%lx");
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
diff --git a/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c b/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c
index e3e20e8848d0..a4f06370a245 100644
--- a/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c
+++ b/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c
@@ -17,7 +17,6 @@
#define KVM_FEP_LENGTH 5
static int fep_available = 1;
-#define VCPU_ID 1
#define MSR_NON_EXISTENT 0x474f4f00
static u64 deny_bits = 0;
@@ -395,31 +394,21 @@ static void guest_ud_handler(struct ex_regs *regs)
regs->rip += KVM_FEP_LENGTH;
}
-static void run_guest(struct kvm_vm *vm)
+static void check_for_guest_assert(struct kvm_vcpu *vcpu)
{
- int rc;
-
- rc = _vcpu_run(vm, VCPU_ID);
- TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
-}
-
-static void check_for_guest_assert(struct kvm_vm *vm)
-{
- struct kvm_run *run = vcpu_state(vm, VCPU_ID);
struct ucall uc;
- if (run->exit_reason == KVM_EXIT_IO &&
- get_ucall(vm, VCPU_ID, &uc) == UCALL_ABORT) {
- TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
- __FILE__, uc.args[1]);
+ if (vcpu->run->exit_reason == KVM_EXIT_IO &&
+ get_ucall(vcpu, &uc) == UCALL_ABORT) {
+ REPORT_GUEST_ASSERT(uc);
}
}
-static void process_rdmsr(struct kvm_vm *vm, uint32_t msr_index)
+static void process_rdmsr(struct kvm_vcpu *vcpu, uint32_t msr_index)
{
- struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct kvm_run *run = vcpu->run;
- check_for_guest_assert(vm);
+ check_for_guest_assert(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_X86_RDMSR,
"Unexpected exit reason: %u (%s),\n",
@@ -450,11 +439,11 @@ static void process_rdmsr(struct kvm_vm *vm, uint32_t msr_index)
}
}
-static void process_wrmsr(struct kvm_vm *vm, uint32_t msr_index)
+static void process_wrmsr(struct kvm_vcpu *vcpu, uint32_t msr_index)
{
- struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct kvm_run *run = vcpu->run;
- check_for_guest_assert(vm);
+ check_for_guest_assert(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_X86_WRMSR,
"Unexpected exit reason: %u (%s),\n",
@@ -481,43 +470,43 @@ static void process_wrmsr(struct kvm_vm *vm, uint32_t msr_index)
}
}
-static void process_ucall_done(struct kvm_vm *vm)
+static void process_ucall_done(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct kvm_run *run = vcpu->run;
struct ucall uc;
- check_for_guest_assert(vm);
+ check_for_guest_assert(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s)",
run->exit_reason,
exit_reason_str(run->exit_reason));
- TEST_ASSERT(get_ucall(vm, VCPU_ID, &uc) == UCALL_DONE,
+ TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_DONE,
"Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
uc.cmd, UCALL_DONE);
}
-static uint64_t process_ucall(struct kvm_vm *vm)
+static uint64_t process_ucall(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct kvm_run *run = vcpu->run;
struct ucall uc = {};
- check_for_guest_assert(vm);
+ check_for_guest_assert(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s)",
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_SYNC:
break;
case UCALL_ABORT:
- check_for_guest_assert(vm);
+ check_for_guest_assert(vcpu);
break;
case UCALL_DONE:
- process_ucall_done(vm);
+ process_ucall_done(vcpu);
break;
default:
TEST_ASSERT(false, "Unexpected ucall");
@@ -526,45 +515,43 @@ static uint64_t process_ucall(struct kvm_vm *vm)
return uc.cmd;
}
-static void run_guest_then_process_rdmsr(struct kvm_vm *vm, uint32_t msr_index)
+static void run_guest_then_process_rdmsr(struct kvm_vcpu *vcpu,
+ uint32_t msr_index)
{
- run_guest(vm);
- process_rdmsr(vm, msr_index);
+ vcpu_run(vcpu);
+ process_rdmsr(vcpu, msr_index);
}
-static void run_guest_then_process_wrmsr(struct kvm_vm *vm, uint32_t msr_index)
+static void run_guest_then_process_wrmsr(struct kvm_vcpu *vcpu,
+ uint32_t msr_index)
{
- run_guest(vm);
- process_wrmsr(vm, msr_index);
+ vcpu_run(vcpu);
+ process_wrmsr(vcpu, msr_index);
}
-static uint64_t run_guest_then_process_ucall(struct kvm_vm *vm)
+static uint64_t run_guest_then_process_ucall(struct kvm_vcpu *vcpu)
{
- run_guest(vm);
- return process_ucall(vm);
+ vcpu_run(vcpu);
+ return process_ucall(vcpu);
}
-static void run_guest_then_process_ucall_done(struct kvm_vm *vm)
+static void run_guest_then_process_ucall_done(struct kvm_vcpu *vcpu)
{
- run_guest(vm);
- process_ucall_done(vm);
+ vcpu_run(vcpu);
+ process_ucall_done(vcpu);
}
-static void test_msr_filter_allow(void) {
- struct kvm_enable_cap cap = {
- .cap = KVM_CAP_X86_USER_SPACE_MSR,
- .args[0] = KVM_MSR_EXIT_REASON_FILTER,
- };
+static void test_msr_filter_allow(void)
+{
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
int rc;
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code_filter_allow);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code_filter_allow);
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
- vm_enable_cap(vm, &cap);
+ vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_FILTER);
rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
@@ -572,43 +559,43 @@ static void test_msr_filter_allow(void) {
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_allow);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, VCPU_ID);
+ vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
/* Process guest code userspace exits. */
- run_guest_then_process_rdmsr(vm, MSR_IA32_XSS);
- run_guest_then_process_wrmsr(vm, MSR_IA32_XSS);
- run_guest_then_process_wrmsr(vm, MSR_IA32_XSS);
+ run_guest_then_process_rdmsr(vcpu, MSR_IA32_XSS);
+ run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
+ run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
- run_guest_then_process_rdmsr(vm, MSR_IA32_FLUSH_CMD);
- run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD);
- run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD);
+ run_guest_then_process_rdmsr(vcpu, MSR_IA32_FLUSH_CMD);
+ run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
+ run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
- run_guest_then_process_wrmsr(vm, MSR_NON_EXISTENT);
- run_guest_then_process_rdmsr(vm, MSR_NON_EXISTENT);
+ run_guest_then_process_wrmsr(vcpu, MSR_NON_EXISTENT);
+ run_guest_then_process_rdmsr(vcpu, MSR_NON_EXISTENT);
vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
- run_guest(vm);
+ vcpu_run(vcpu);
vm_install_exception_handler(vm, UD_VECTOR, NULL);
- if (process_ucall(vm) != UCALL_DONE) {
+ if (process_ucall(vcpu) != UCALL_DONE) {
vm_install_exception_handler(vm, GP_VECTOR, guest_fep_gp_handler);
/* Process emulated rdmsr and wrmsr instructions. */
- run_guest_then_process_rdmsr(vm, MSR_IA32_XSS);
- run_guest_then_process_wrmsr(vm, MSR_IA32_XSS);
- run_guest_then_process_wrmsr(vm, MSR_IA32_XSS);
+ run_guest_then_process_rdmsr(vcpu, MSR_IA32_XSS);
+ run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
+ run_guest_then_process_wrmsr(vcpu, MSR_IA32_XSS);
- run_guest_then_process_rdmsr(vm, MSR_IA32_FLUSH_CMD);
- run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD);
- run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD);
+ run_guest_then_process_rdmsr(vcpu, MSR_IA32_FLUSH_CMD);
+ run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
+ run_guest_then_process_wrmsr(vcpu, MSR_IA32_FLUSH_CMD);
- run_guest_then_process_wrmsr(vm, MSR_NON_EXISTENT);
- run_guest_then_process_rdmsr(vm, MSR_NON_EXISTENT);
+ run_guest_then_process_wrmsr(vcpu, MSR_NON_EXISTENT);
+ run_guest_then_process_rdmsr(vcpu, MSR_NON_EXISTENT);
/* Confirm the guest completed without issues. */
- run_guest_then_process_ucall_done(vm);
+ run_guest_then_process_ucall_done(vcpu);
} else {
printf("To run the instruction emulated tests set the module parameter 'kvm.force_emulation_prefix=1'\n");
}
@@ -616,16 +603,16 @@ static void test_msr_filter_allow(void) {
kvm_vm_free(vm);
}
-static int handle_ucall(struct kvm_vm *vm)
+static int handle_ucall(struct kvm_vcpu *vcpu)
{
struct ucall uc;
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- TEST_FAIL("Guest assertion not met");
+ REPORT_GUEST_ASSERT(uc);
break;
case UCALL_SYNC:
- vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &no_filter_deny);
+ vm_ioctl(vcpu->vm, KVM_X86_SET_MSR_FILTER, &no_filter_deny);
break;
case UCALL_DONE:
return 1;
@@ -673,25 +660,21 @@ static void handle_wrmsr(struct kvm_run *run)
}
}
-static void test_msr_filter_deny(void) {
- struct kvm_enable_cap cap = {
- .cap = KVM_CAP_X86_USER_SPACE_MSR,
- .args[0] = KVM_MSR_EXIT_REASON_INVAL |
- KVM_MSR_EXIT_REASON_UNKNOWN |
- KVM_MSR_EXIT_REASON_FILTER,
- };
+static void test_msr_filter_deny(void)
+{
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
int rc;
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code_filter_deny);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
- run = vcpu_state(vm, VCPU_ID);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code_filter_deny);
+ run = vcpu->run;
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
- vm_enable_cap(vm, &cap);
+ vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_INVAL |
+ KVM_MSR_EXIT_REASON_UNKNOWN |
+ KVM_MSR_EXIT_REASON_FILTER);
rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
@@ -700,9 +683,7 @@ static void test_msr_filter_deny(void) {
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_deny);
while (1) {
- rc = _vcpu_run(vm, VCPU_ID);
-
- TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
+ vcpu_run(vcpu);
switch (run->exit_reason) {
case KVM_EXIT_X86_RDMSR:
@@ -712,7 +693,7 @@ static void test_msr_filter_deny(void) {
handle_wrmsr(run);
break;
case KVM_EXIT_IO:
- if (handle_ucall(vm))
+ if (handle_ucall(vcpu))
goto done;
break;
}
@@ -726,31 +707,28 @@ done:
kvm_vm_free(vm);
}
-static void test_msr_permission_bitmap(void) {
- struct kvm_enable_cap cap = {
- .cap = KVM_CAP_X86_USER_SPACE_MSR,
- .args[0] = KVM_MSR_EXIT_REASON_FILTER,
- };
+static void test_msr_permission_bitmap(void)
+{
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
int rc;
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code_permission_bitmap);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code_permission_bitmap);
rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
- vm_enable_cap(vm, &cap);
+ vm_enable_cap(vm, KVM_CAP_X86_USER_SPACE_MSR, KVM_MSR_EXIT_REASON_FILTER);
rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_fs);
- run_guest_then_process_rdmsr(vm, MSR_FS_BASE);
- TEST_ASSERT(run_guest_then_process_ucall(vm) == UCALL_SYNC, "Expected ucall state to be UCALL_SYNC.");
+ run_guest_then_process_rdmsr(vcpu, MSR_FS_BASE);
+ TEST_ASSERT(run_guest_then_process_ucall(vcpu) == UCALL_SYNC,
+ "Expected ucall state to be UCALL_SYNC.");
vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_gs);
- run_guest_then_process_rdmsr(vm, MSR_GS_BASE);
- run_guest_then_process_ucall_done(vm);
+ run_guest_then_process_rdmsr(vcpu, MSR_GS_BASE);
+ run_guest_then_process_ucall_done(vcpu);
kvm_vm_free(vm);
}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c b/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c
index d438c4d3228a..5abecf06329e 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c
@@ -28,11 +28,6 @@
#include "kselftest.h"
-#define VCPU_ID 0
-
-/* The virtual machine object. */
-static struct kvm_vm *vm;
-
static void l2_guest_code(void)
{
/* Exit to L1 */
@@ -77,33 +72,29 @@ static void l1_guest_code(struct vmx_pages *vmx_pages, unsigned long high_gpa)
int main(int argc, char *argv[])
{
unsigned long apic_access_addr = ~0ul;
- unsigned int paddr_width;
- unsigned int vaddr_width;
vm_vaddr_t vmx_pages_gva;
unsigned long high_gpa;
struct vmx_pages *vmx;
bool done = false;
- nested_vmx_check_supported();
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
- vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
- kvm_get_cpu_address_width(&paddr_width, &vaddr_width);
- high_gpa = (1ul << paddr_width) - getpagesize();
- if ((unsigned long)DEFAULT_GUEST_PHY_PAGES * getpagesize() > high_gpa) {
- print_skip("No unbacked physical page available");
- exit(KSFT_SKIP);
- }
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
+
+ high_gpa = (vm->max_gfn - 1) << vm->page_shift;
vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
prepare_virtualize_apic_accesses(vmx, vm);
- vcpu_args_set(vm, VCPU_ID, 2, vmx_pages_gva, high_gpa);
+ vcpu_args_set(vcpu, 2, vmx_pages_gva, high_gpa);
while (!done) {
- volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
- vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
if (apic_access_addr == high_gpa) {
TEST_ASSERT(run->exit_reason ==
KVM_EXIT_INTERNAL_ERROR,
@@ -121,10 +112,9 @@ int main(int argc, char *argv[])
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
- __FILE__, uc.args[1]);
+ REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
apic_access_addr = uc.args[1];
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c b/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c
index edac8839e717..d79651b02740 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c
@@ -18,15 +18,10 @@
#include "kselftest.h"
-#define VCPU_ID 5
-
enum {
PORT_L0_EXIT = 0x2000,
};
-/* The virtual machine object. */
-static struct kvm_vm *vm;
-
static void l2_guest_code(void)
{
/* Exit to L0 */
@@ -53,20 +48,22 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
int main(int argc, char *argv[])
{
vm_vaddr_t vmx_pages_gva;
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
- nested_vmx_check_supported();
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
- vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
/* Allocate VMX pages and shared descriptors (vmx_pages). */
vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+ vcpu_args_set(vcpu, 1, vmx_pages_gva);
for (;;) {
- volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
- vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
@@ -75,9 +72,9 @@ int main(int argc, char *argv[])
if (run->io.port == PORT_L0_EXIT)
break;
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- TEST_FAIL("%s", (const char *)uc.args[0]);
+ REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
index 68f26a8b4f42..2d8c23d639f7 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
@@ -17,8 +17,6 @@
#include "processor.h"
#include "vmx.h"
-#define VCPU_ID 1
-
/* The memory slot index to track dirty pages */
#define TEST_MEM_SLOT_INDEX 1
#define TEST_MEM_PAGES 3
@@ -73,18 +71,19 @@ int main(int argc, char *argv[])
unsigned long *bmap;
uint64_t *host_test_mem;
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
struct kvm_run *run;
struct ucall uc;
bool done = false;
- nested_vmx_check_supported();
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
/* Create VM */
- vm = vm_create_default(VCPU_ID, 0, l1_guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
- run = vcpu_state(vm, VCPU_ID);
+ vcpu_args_set(vcpu, 1, vmx_pages_gva);
+ run = vcpu->run;
/* Add an extra memory slot for testing dirty logging */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
@@ -116,16 +115,15 @@ int main(int argc, char *argv[])
while (!done) {
memset(host_test_mem, 0xaa, TEST_MEM_PAGES * 4096);
- _vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
- __FILE__, uc.args[1]);
+ REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
/*
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c b/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c
index 27a850f3d7ce..2641b286b4ed 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c
@@ -10,10 +10,6 @@
#include "kselftest.h"
-#define VCPU_ID 0
-
-static struct kvm_vm *vm;
-
static void guest_ud_handler(struct ex_regs *regs)
{
/* Loop on the ud2 until guest state is made invalid. */
@@ -24,11 +20,11 @@ static void guest_code(void)
asm volatile("ud2");
}
-static void __run_vcpu_with_invalid_state(void)
+static void __run_vcpu_with_invalid_state(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct kvm_run *run = vcpu->run;
- vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
"Expected KVM_EXIT_INTERNAL_ERROR, got %d (%s)\n",
@@ -38,15 +34,15 @@ static void __run_vcpu_with_invalid_state(void)
run->emulation_failure.suberror);
}
-static void run_vcpu_with_invalid_state(void)
+static void run_vcpu_with_invalid_state(struct kvm_vcpu *vcpu)
{
/*
* Always run twice to verify KVM handles the case where _KVM_ queues
* an exception with invalid state and then exits to userspace, i.e.
* that KVM doesn't explode if userspace ignores the initial error.
*/
- __run_vcpu_with_invalid_state();
- __run_vcpu_with_invalid_state();
+ __run_vcpu_with_invalid_state(vcpu);
+ __run_vcpu_with_invalid_state(vcpu);
}
static void set_timer(void)
@@ -59,33 +55,43 @@ static void set_timer(void)
ASSERT_EQ(setitimer(ITIMER_REAL, &timer, NULL), 0);
}
-static void set_or_clear_invalid_guest_state(bool set)
+static void set_or_clear_invalid_guest_state(struct kvm_vcpu *vcpu, bool set)
{
static struct kvm_sregs sregs;
if (!sregs.cr0)
- vcpu_sregs_get(vm, VCPU_ID, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
sregs.tr.unusable = !!set;
- vcpu_sregs_set(vm, VCPU_ID, &sregs);
+ vcpu_sregs_set(vcpu, &sregs);
+}
+
+static void set_invalid_guest_state(struct kvm_vcpu *vcpu)
+{
+ set_or_clear_invalid_guest_state(vcpu, true);
}
-static void set_invalid_guest_state(void)
+static void clear_invalid_guest_state(struct kvm_vcpu *vcpu)
{
- set_or_clear_invalid_guest_state(true);
+ set_or_clear_invalid_guest_state(vcpu, false);
}
-static void clear_invalid_guest_state(void)
+static struct kvm_vcpu *get_set_sigalrm_vcpu(struct kvm_vcpu *__vcpu)
{
- set_or_clear_invalid_guest_state(false);
+ static struct kvm_vcpu *vcpu = NULL;
+
+ if (__vcpu)
+ vcpu = __vcpu;
+ return vcpu;
}
static void sigalrm_handler(int sig)
{
+ struct kvm_vcpu *vcpu = get_set_sigalrm_vcpu(NULL);
struct kvm_vcpu_events events;
TEST_ASSERT(sig == SIGALRM, "Unexpected signal = %d", sig);
- vcpu_events_get(vm, VCPU_ID, &events);
+ vcpu_events_get(vcpu, &events);
/*
* If an exception is pending, attempt KVM_RUN with invalid guest,
@@ -93,8 +99,8 @@ static void sigalrm_handler(int sig)
* between KVM queueing an exception and re-entering the guest.
*/
if (events.exception.pending) {
- set_invalid_guest_state();
- run_vcpu_with_invalid_state();
+ set_invalid_guest_state(vcpu);
+ run_vcpu_with_invalid_state(vcpu);
} else {
set_timer();
}
@@ -102,15 +108,17 @@ static void sigalrm_handler(int sig)
int main(int argc, char *argv[])
{
- if (!is_intel_cpu() || vm_is_unrestricted_guest(NULL)) {
- print_skip("Must be run with kvm_intel.unrestricted_guest=0");
- exit(KSFT_SKIP);
- }
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+
+ TEST_REQUIRE(is_intel_cpu());
+ TEST_REQUIRE(!vm_is_unrestricted_guest(NULL));
- vm = vm_create_default(VCPU_ID, 0, (void *)guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+ get_set_sigalrm_vcpu(vcpu);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, VCPU_ID);
+ vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
@@ -119,8 +127,8 @@ int main(int argc, char *argv[])
* KVM_RUN should induce a TRIPLE_FAULT in L2 as KVM doesn't support
* emulating invalid guest state for L2.
*/
- set_invalid_guest_state();
- run_vcpu_with_invalid_state();
+ set_invalid_guest_state(vcpu);
+ run_vcpu_with_invalid_state(vcpu);
/*
* Verify KVM also handles the case where userspace gains control while
@@ -129,11 +137,11 @@ int main(int argc, char *argv[])
* guest with invalid state when the handler interrupts KVM with an
* exception pending.
*/
- clear_invalid_guest_state();
+ clear_invalid_guest_state(vcpu);
TEST_ASSERT(signal(SIGALRM, sigalrm_handler) != SIG_ERR,
"Failed to register SIGALRM handler, errno = %d (%s)",
errno, strerror(errno));
set_timer();
- run_vcpu_with_invalid_state();
+ run_vcpu_with_invalid_state(vcpu);
}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c b/tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c
index 489fbed4ca6f..6bfb4bb471ca 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c
@@ -9,7 +9,6 @@
#include "kselftest.h"
-#define VCPU_ID 0
#define ARBITRARY_IO_PORT 0x2000
static struct kvm_vm *vm;
@@ -55,20 +54,21 @@ int main(int argc, char *argv[])
{
vm_vaddr_t vmx_pages_gva;
struct kvm_sregs sregs;
+ struct kvm_vcpu *vcpu;
struct kvm_run *run;
struct ucall uc;
- nested_vmx_check_supported();
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
- vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
/* Allocate VMX pages and shared descriptors (vmx_pages). */
vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+ vcpu_args_set(vcpu, 1, vmx_pages_gva);
- vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
- run = vcpu_state(vm, VCPU_ID);
+ run = vcpu->run;
/*
* The first exit to L0 userspace should be an I/O access from L2.
@@ -88,17 +88,17 @@ int main(int argc, char *argv[])
* emulating invalid guest state for L2.
*/
memset(&sregs, 0, sizeof(sregs));
- vcpu_sregs_get(vm, VCPU_ID, &sregs);
+ vcpu_sregs_get(vcpu, &sregs);
sregs.tr.unusable = 1;
- vcpu_sregs_set(vm, VCPU_ID, &sregs);
+ vcpu_sregs_set(vcpu, &sregs);
- vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_DONE:
break;
case UCALL_ABORT:
- TEST_FAIL("%s", (const char *)uc.args[0]);
+ REPORT_GUEST_ASSERT(uc);
default:
TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_msrs_test.c b/tools/testing/selftests/kvm/x86_64/vmx_msrs_test.c
new file mode 100644
index 000000000000..322d561b4260
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/vmx_msrs_test.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VMX control MSR test
+ *
+ * Copyright (C) 2022 Google LLC.
+ *
+ * Tests for KVM ownership of bits in the VMX entry/exit control MSRs. Checks
+ * that KVM will set owned bits where appropriate, and will not if
+ * KVM_X86_QUIRK_TWEAK_VMX_CTRL_MSRS is disabled.
+ */
+#include <linux/bitmap.h>
+#include "kvm_util.h"
+#include "vmx.h"
+
+static void vmx_fixed1_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index,
+ uint64_t mask)
+{
+ uint64_t val = vcpu_get_msr(vcpu, msr_index);
+ uint64_t bit;
+
+ mask &= val;
+
+ for_each_set_bit(bit, &mask, 64) {
+ vcpu_set_msr(vcpu, msr_index, val & ~BIT_ULL(bit));
+ vcpu_set_msr(vcpu, msr_index, val);
+ }
+}
+
+static void vmx_fixed0_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index,
+ uint64_t mask)
+{
+ uint64_t val = vcpu_get_msr(vcpu, msr_index);
+ uint64_t bit;
+
+ mask = ~mask | val;
+
+ for_each_clear_bit(bit, &mask, 64) {
+ vcpu_set_msr(vcpu, msr_index, val | BIT_ULL(bit));
+ vcpu_set_msr(vcpu, msr_index, val);
+ }
+}
+
+static void vmx_fixed0and1_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index)
+{
+ vmx_fixed0_msr_test(vcpu, msr_index, GENMASK_ULL(31, 0));
+ vmx_fixed1_msr_test(vcpu, msr_index, GENMASK_ULL(63, 32));
+}
+
+static void vmx_save_restore_msrs_test(struct kvm_vcpu *vcpu)
+{
+ vcpu_set_msr(vcpu, MSR_IA32_VMX_VMCS_ENUM, 0);
+ vcpu_set_msr(vcpu, MSR_IA32_VMX_VMCS_ENUM, -1ull);
+
+ vmx_fixed1_msr_test(vcpu, MSR_IA32_VMX_BASIC,
+ BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55));
+
+ vmx_fixed1_msr_test(vcpu, MSR_IA32_VMX_MISC,
+ BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) |
+ BIT_ULL(15) | BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30));
+
+ vmx_fixed0and1_msr_test(vcpu, MSR_IA32_VMX_PROCBASED_CTLS2);
+ vmx_fixed1_msr_test(vcpu, MSR_IA32_VMX_EPT_VPID_CAP, -1ull);
+ vmx_fixed0and1_msr_test(vcpu, MSR_IA32_VMX_TRUE_PINBASED_CTLS);
+ vmx_fixed0and1_msr_test(vcpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS);
+ vmx_fixed0and1_msr_test(vcpu, MSR_IA32_VMX_TRUE_EXIT_CTLS);
+ vmx_fixed0and1_msr_test(vcpu, MSR_IA32_VMX_TRUE_ENTRY_CTLS);
+ vmx_fixed1_msr_test(vcpu, MSR_IA32_VMX_VMFUNC, -1ull);
+}
+
+int main(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_DISABLE_QUIRKS2));
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
+
+ /* No need to actually do KVM_RUN, thus no guest code. */
+ vm = vm_create_with_one_vcpu(&vcpu, NULL);
+
+ vmx_save_restore_msrs_test(vcpu);
+
+ kvm_vm_free(vm);
+}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c b/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c
index 280c01fd2412..465a9434d61c 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c
@@ -15,9 +15,6 @@
#include "vmx.h"
#include "kselftest.h"
-
-#define VCPU_ID 0
-
/* L2 is scaled up (from L1's perspective) by this factor */
#define L2_SCALE_FACTOR 4ULL
@@ -119,14 +116,6 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_DONE();
}
-static void tsc_scaling_check_supported(void)
-{
- if (!kvm_check_cap(KVM_CAP_TSC_CONTROL)) {
- print_skip("TSC scaling not supported by the HW");
- exit(KSFT_SKIP);
- }
-}
-
static void stable_tsc_check_supported(void)
{
FILE *fp;
@@ -150,6 +139,7 @@ skip_test:
int main(int argc, char *argv[])
{
+ struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
vm_vaddr_t vmx_pages_gva;
@@ -160,8 +150,8 @@ int main(int argc, char *argv[])
uint64_t l1_tsc_freq = 0;
uint64_t l2_tsc_freq = 0;
- nested_vmx_check_supported();
- tsc_scaling_check_supported();
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_TSC_CONTROL));
stable_tsc_check_supported();
/*
@@ -182,30 +172,29 @@ int main(int argc, char *argv[])
l0_tsc_freq = tsc_end - tsc_start;
printf("real TSC frequency is around: %"PRIu64"\n", l0_tsc_freq);
- vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+ vcpu_args_set(vcpu, 1, vmx_pages_gva);
- tsc_khz = _vcpu_ioctl(vm, VCPU_ID, KVM_GET_TSC_KHZ, NULL);
+ tsc_khz = __vcpu_ioctl(vcpu, KVM_GET_TSC_KHZ, NULL);
TEST_ASSERT(tsc_khz != -1, "vcpu ioctl KVM_GET_TSC_KHZ failed");
/* scale down L1's TSC frequency */
- vcpu_ioctl(vm, VCPU_ID, KVM_SET_TSC_KHZ,
- (void *) (tsc_khz / l1_scale_factor));
+ vcpu_ioctl(vcpu, KVM_SET_TSC_KHZ, (void *) (tsc_khz / l1_scale_factor));
for (;;) {
- volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
- vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- TEST_FAIL("%s", (const char *) uc.args[0]);
+ REPORT_GUEST_ASSERT(uc);
case UCALL_SYNC:
switch (uc.args[0]) {
case USLEEP:
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c
new file mode 100644
index 000000000000..6ec901dab61e
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test for VMX-pmu perf capability msr
+ *
+ * Copyright (C) 2021 Intel Corporation
+ *
+ * Test to check the effect of various CPUID settings on
+ * MSR_IA32_PERF_CAPABILITIES MSR, and check that what
+ * we write with KVM_SET_MSR is _not_ modified by the guest
+ * and check it can be retrieved with KVM_GET_MSR, also test
+ * the invalid LBR formats are rejected.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <sys/ioctl.h>
+
+#include "kvm_util.h"
+#include "vmx.h"
+
+#define PMU_CAP_FW_WRITES (1ULL << 13)
+#define PMU_CAP_LBR_FMT 0x3f
+
+union cpuid10_eax {
+ struct {
+ unsigned int version_id:8;
+ unsigned int num_counters:8;
+ unsigned int bit_width:8;
+ unsigned int mask_length:8;
+ } split;
+ unsigned int full;
+};
+
+union perf_capabilities {
+ struct {
+ u64 lbr_format:6;
+ u64 pebs_trap:1;
+ u64 pebs_arch_reg:1;
+ u64 pebs_format:4;
+ u64 smm_freeze:1;
+ u64 full_width_write:1;
+ u64 pebs_baseline:1;
+ u64 perf_metrics:1;
+ u64 pebs_output_pt_available:1;
+ u64 anythread_deprecated:1;
+ };
+ u64 capabilities;
+};
+
+static void guest_code(void)
+{
+ wrmsr(MSR_IA32_PERF_CAPABILITIES, PMU_CAP_LBR_FMT);
+}
+
+int main(int argc, char *argv[])
+{
+ const struct kvm_cpuid_entry2 *entry_a_0;
+ struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu;
+ int ret;
+ union cpuid10_eax eax;
+ union perf_capabilities host_cap;
+
+ host_cap.capabilities = kvm_get_feature_msr(MSR_IA32_PERF_CAPABILITIES);
+ host_cap.capabilities &= (PMU_CAP_FW_WRITES | PMU_CAP_LBR_FMT);
+
+ /* Create VM */
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_PDCM));
+
+ TEST_REQUIRE(kvm_get_cpuid_max_basic() >= 0xa);
+ entry_a_0 = kvm_get_supported_cpuid_entry(0xa);
+
+ eax.full = entry_a_0->eax;
+ __TEST_REQUIRE(eax.split.version_id, "PMU is not supported by the vCPU");
+
+ /* testcase 1, set capabilities when we have PDCM bit */
+ vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES);
+
+ /* check capabilities can be retrieved with KVM_GET_MSR */
+ ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES);
+
+ /* check whatever we write with KVM_SET_MSR is _not_ modified */
+ vcpu_run(vcpu);
+ ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES);
+
+ /* testcase 2, check valid LBR formats are accepted */
+ vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0);
+ ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), 0);
+
+ vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.lbr_format);
+ ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), (u64)host_cap.lbr_format);
+
+ /* testcase 3, check invalid LBR format is rejected */
+ /* Note, on Arch LBR capable platforms, LBR_FMT in perf capability msr is 0x3f,
+ * to avoid the failure, use a true invalid format 0x30 for the test. */
+ ret = _vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, 0x30);
+ TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail.");
+
+ printf("Completed perf capability tests.\n");
+ kvm_vm_free(vm);
+}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c
deleted file mode 100644
index 2454a1f2ca0c..000000000000
--- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c
+++ /dev/null
@@ -1,114 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * VMX-pmu related msrs test
- *
- * Copyright (C) 2021 Intel Corporation
- *
- * Test to check the effect of various CPUID settings
- * on the MSR_IA32_PERF_CAPABILITIES MSR, and check that
- * whatever we write with KVM_SET_MSR is _not_ modified
- * in the guest and test it can be retrieved with KVM_GET_MSR.
- *
- * Test to check that invalid LBR formats are rejected.
- */
-
-#define _GNU_SOURCE /* for program_invocation_short_name */
-#include <sys/ioctl.h>
-
-#include "kvm_util.h"
-#include "vmx.h"
-
-#define VCPU_ID 0
-
-#define X86_FEATURE_PDCM (1<<15)
-#define PMU_CAP_FW_WRITES (1ULL << 13)
-#define PMU_CAP_LBR_FMT 0x3f
-
-union cpuid10_eax {
- struct {
- unsigned int version_id:8;
- unsigned int num_counters:8;
- unsigned int bit_width:8;
- unsigned int mask_length:8;
- } split;
- unsigned int full;
-};
-
-union perf_capabilities {
- struct {
- u64 lbr_format:6;
- u64 pebs_trap:1;
- u64 pebs_arch_reg:1;
- u64 pebs_format:4;
- u64 smm_freeze:1;
- u64 full_width_write:1;
- u64 pebs_baseline:1;
- u64 perf_metrics:1;
- u64 pebs_output_pt_available:1;
- u64 anythread_deprecated:1;
- };
- u64 capabilities;
-};
-
-static void guest_code(void)
-{
- wrmsr(MSR_IA32_PERF_CAPABILITIES, PMU_CAP_LBR_FMT);
-}
-
-int main(int argc, char *argv[])
-{
- struct kvm_cpuid2 *cpuid;
- struct kvm_cpuid_entry2 *entry_1_0;
- struct kvm_cpuid_entry2 *entry_a_0;
- bool pdcm_supported = false;
- struct kvm_vm *vm;
- int ret;
- union cpuid10_eax eax;
- union perf_capabilities host_cap;
-
- host_cap.capabilities = kvm_get_feature_msr(MSR_IA32_PERF_CAPABILITIES);
- host_cap.capabilities &= (PMU_CAP_FW_WRITES | PMU_CAP_LBR_FMT);
-
- /* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code);
- cpuid = kvm_get_supported_cpuid();
-
- if (kvm_get_cpuid_max_basic() >= 0xa) {
- entry_1_0 = kvm_get_supported_cpuid_index(1, 0);
- entry_a_0 = kvm_get_supported_cpuid_index(0xa, 0);
- pdcm_supported = entry_1_0 && !!(entry_1_0->ecx & X86_FEATURE_PDCM);
- eax.full = entry_a_0->eax;
- }
- if (!pdcm_supported) {
- print_skip("MSR_IA32_PERF_CAPABILITIES is not supported by the vCPU");
- exit(KSFT_SKIP);
- }
- if (!eax.split.version_id) {
- print_skip("PMU is not supported by the vCPU");
- exit(KSFT_SKIP);
- }
-
- /* testcase 1, set capabilities when we have PDCM bit */
- vcpu_set_cpuid(vm, VCPU_ID, cpuid);
- vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES);
-
- /* check capabilities can be retrieved with KVM_GET_MSR */
- ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES);
-
- /* check whatever we write with KVM_SET_MSR is _not_ modified */
- vcpu_run(vm, VCPU_ID);
- ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), PMU_CAP_FW_WRITES);
-
- /* testcase 2, check valid LBR formats are accepted */
- vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, 0);
- ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), 0);
-
- vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, host_cap.lbr_format);
- ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), (u64)host_cap.lbr_format);
-
- /* testcase 3, check invalid LBR format is rejected */
- ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_LBR_FMT);
- TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail.");
-
- kvm_vm_free(vm);
-}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c b/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c
index ff92e25b6f1e..0efdc05969a5 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c
@@ -22,7 +22,6 @@
#include "processor.h"
#include "vmx.h"
-#define VCPU_ID 5
#define PREEMPTION_TIMER_VALUE 100000000ull
#define PREEMPTION_TIMER_VALUE_THRESHOLD1 80000000ull
@@ -159,6 +158,7 @@ int main(int argc, char *argv[])
struct kvm_regs regs1, regs2;
struct kvm_vm *vm;
struct kvm_run *run;
+ struct kvm_vcpu *vcpu;
struct kvm_x86_state *state;
struct ucall uc;
int stage;
@@ -167,33 +167,29 @@ int main(int argc, char *argv[])
* AMD currently does not implement any VMX features, so for now we
* just early out.
*/
- nested_vmx_check_supported();
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
- if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) {
- print_skip("KVM_CAP_NESTED_STATE not supported");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
/* Create VM */
- vm = vm_create_default(VCPU_ID, 0, guest_code);
- run = vcpu_state(vm, VCPU_ID);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+ run = vcpu->run;
- vcpu_regs_get(vm, VCPU_ID, &regs1);
+ vcpu_regs_get(vcpu, &regs1);
vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+ vcpu_args_set(vcpu, 1, vmx_pages_gva);
for (stage = 1;; stage++) {
- _vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Stage %d: unexpected exit reason: %u (%s),\n",
stage, run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
- __FILE__, uc.args[1]);
+ REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
break;
@@ -232,22 +228,20 @@ int main(int argc, char *argv[])
stage, uc.args[4], uc.args[5]);
}
- state = vcpu_save_state(vm, VCPU_ID);
+ state = vcpu_save_state(vcpu);
memset(&regs1, 0, sizeof(regs1));
- vcpu_regs_get(vm, VCPU_ID, &regs1);
+ vcpu_regs_get(vcpu, &regs1);
kvm_vm_release(vm);
/* Restore state in a new VM. */
- kvm_vm_restart(vm, O_RDWR);
- vm_vcpu_add(vm, VCPU_ID);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
- vcpu_load_state(vm, VCPU_ID, state);
- run = vcpu_state(vm, VCPU_ID);
+ vcpu = vm_recreate_with_one_vcpu(vm);
+ vcpu_load_state(vcpu, state);
+ run = vcpu->run;
kvm_x86_state_cleanup(state);
memset(&regs2, 0, sizeof(regs2));
- vcpu_regs_get(vm, VCPU_ID, &regs2);
+ vcpu_regs_get(vcpu, &regs2);
TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
"Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
(ulong) regs2.rdi, (ulong) regs2.rsi);
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
index 5827b9bae468..41ea7028a1f8 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
@@ -23,38 +23,37 @@
* changes this should be updated.
*/
#define VMCS12_REVISION 0x11e57ed0
-#define VCPU_ID 5
bool have_evmcs;
-void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state)
+void test_nested_state(struct kvm_vcpu *vcpu, struct kvm_nested_state *state)
{
- vcpu_nested_state_set(vm, VCPU_ID, state, false);
+ vcpu_nested_state_set(vcpu, state);
}
-void test_nested_state_expect_errno(struct kvm_vm *vm,
+void test_nested_state_expect_errno(struct kvm_vcpu *vcpu,
struct kvm_nested_state *state,
int expected_errno)
{
int rv;
- rv = vcpu_nested_state_set(vm, VCPU_ID, state, true);
+ rv = __vcpu_nested_state_set(vcpu, state);
TEST_ASSERT(rv == -1 && errno == expected_errno,
"Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)",
strerror(expected_errno), expected_errno, rv, strerror(errno),
errno);
}
-void test_nested_state_expect_einval(struct kvm_vm *vm,
+void test_nested_state_expect_einval(struct kvm_vcpu *vcpu,
struct kvm_nested_state *state)
{
- test_nested_state_expect_errno(vm, state, EINVAL);
+ test_nested_state_expect_errno(vcpu, state, EINVAL);
}
-void test_nested_state_expect_efault(struct kvm_vm *vm,
+void test_nested_state_expect_efault(struct kvm_vcpu *vcpu,
struct kvm_nested_state *state)
{
- test_nested_state_expect_errno(vm, state, EFAULT);
+ test_nested_state_expect_errno(vcpu, state, EFAULT);
}
void set_revision_id_for_vmcs12(struct kvm_nested_state *state,
@@ -86,7 +85,7 @@ void set_default_vmx_state(struct kvm_nested_state *state, int size)
set_revision_id_for_vmcs12(state, VMCS12_REVISION);
}
-void test_vmx_nested_state(struct kvm_vm *vm)
+void test_vmx_nested_state(struct kvm_vcpu *vcpu)
{
/* Add a page for VMCS12. */
const int state_sz = sizeof(struct kvm_nested_state) + getpagesize();
@@ -96,14 +95,14 @@ void test_vmx_nested_state(struct kvm_vm *vm)
/* The format must be set to 0. 0 for VMX, 1 for SVM. */
set_default_vmx_state(state, state_sz);
state->format = 1;
- test_nested_state_expect_einval(vm, state);
+ test_nested_state_expect_einval(vcpu, state);
/*
* We cannot virtualize anything if the guest does not have VMX
* enabled.
*/
set_default_vmx_state(state, state_sz);
- test_nested_state_expect_einval(vm, state);
+ test_nested_state_expect_einval(vcpu, state);
/*
* We cannot virtualize anything if the guest does not have VMX
@@ -112,17 +111,17 @@ void test_vmx_nested_state(struct kvm_vm *vm)
*/
set_default_vmx_state(state, state_sz);
state->hdr.vmx.vmxon_pa = -1ull;
- test_nested_state_expect_einval(vm, state);
+ test_nested_state_expect_einval(vcpu, state);
state->hdr.vmx.vmcs12_pa = -1ull;
state->flags = KVM_STATE_NESTED_EVMCS;
- test_nested_state_expect_einval(vm, state);
+ test_nested_state_expect_einval(vcpu, state);
state->flags = 0;
- test_nested_state(vm, state);
+ test_nested_state(vcpu, state);
/* Enable VMX in the guest CPUID. */
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+ vcpu_set_cpuid_feature(vcpu, X86_FEATURE_VMX);
/*
* Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
@@ -133,34 +132,34 @@ void test_vmx_nested_state(struct kvm_vm *vm)
set_default_vmx_state(state, state_sz);
state->hdr.vmx.vmxon_pa = -1ull;
state->hdr.vmx.vmcs12_pa = -1ull;
- test_nested_state_expect_einval(vm, state);
+ test_nested_state_expect_einval(vcpu, state);
state->flags &= KVM_STATE_NESTED_EVMCS;
if (have_evmcs) {
- test_nested_state_expect_einval(vm, state);
- vcpu_enable_evmcs(vm, VCPU_ID);
+ test_nested_state_expect_einval(vcpu, state);
+ vcpu_enable_evmcs(vcpu);
}
- test_nested_state(vm, state);
+ test_nested_state(vcpu, state);
/* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
state->hdr.vmx.smm.flags = 1;
- test_nested_state_expect_einval(vm, state);
+ test_nested_state_expect_einval(vcpu, state);
/* Invalid flags are rejected. */
set_default_vmx_state(state, state_sz);
state->hdr.vmx.flags = ~0;
- test_nested_state_expect_einval(vm, state);
+ test_nested_state_expect_einval(vcpu, state);
/* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */
set_default_vmx_state(state, state_sz);
state->hdr.vmx.vmxon_pa = -1ull;
state->flags = 0;
- test_nested_state_expect_einval(vm, state);
+ test_nested_state_expect_einval(vcpu, state);
/* It is invalid to have vmxon_pa set to a non-page aligned address. */
set_default_vmx_state(state, state_sz);
state->hdr.vmx.vmxon_pa = 1;
- test_nested_state_expect_einval(vm, state);
+ test_nested_state_expect_einval(vcpu, state);
/*
* It is invalid to have KVM_STATE_NESTED_SMM_GUEST_MODE and
@@ -170,7 +169,7 @@ void test_vmx_nested_state(struct kvm_vm *vm)
state->flags = KVM_STATE_NESTED_GUEST_MODE |
KVM_STATE_NESTED_RUN_PENDING;
state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
- test_nested_state_expect_einval(vm, state);
+ test_nested_state_expect_einval(vcpu, state);
/*
* It is invalid to have any of the SMM flags set besides:
@@ -180,13 +179,13 @@ void test_vmx_nested_state(struct kvm_vm *vm)
set_default_vmx_state(state, state_sz);
state->hdr.vmx.smm.flags = ~(KVM_STATE_NESTED_SMM_GUEST_MODE |
KVM_STATE_NESTED_SMM_VMXON);
- test_nested_state_expect_einval(vm, state);
+ test_nested_state_expect_einval(vcpu, state);
/* Outside SMM, SMM flags must be zero. */
set_default_vmx_state(state, state_sz);
state->flags = 0;
state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
- test_nested_state_expect_einval(vm, state);
+ test_nested_state_expect_einval(vcpu, state);
/*
* Size must be large enough to fit kvm_nested_state and vmcs12
@@ -195,13 +194,13 @@ void test_vmx_nested_state(struct kvm_vm *vm)
set_default_vmx_state(state, state_sz);
state->size = sizeof(*state);
state->flags = 0;
- test_nested_state_expect_einval(vm, state);
+ test_nested_state_expect_einval(vcpu, state);
set_default_vmx_state(state, state_sz);
state->size = sizeof(*state);
state->flags = 0;
state->hdr.vmx.vmcs12_pa = -1;
- test_nested_state(vm, state);
+ test_nested_state(vcpu, state);
/*
* KVM_SET_NESTED_STATE succeeds with invalid VMCS
@@ -209,7 +208,7 @@ void test_vmx_nested_state(struct kvm_vm *vm)
*/
set_default_vmx_state(state, state_sz);
state->flags = 0;
- test_nested_state(vm, state);
+ test_nested_state(vcpu, state);
/* Invalid flags are rejected, even if no VMCS loaded. */
set_default_vmx_state(state, state_sz);
@@ -217,13 +216,13 @@ void test_vmx_nested_state(struct kvm_vm *vm)
state->flags = 0;
state->hdr.vmx.vmcs12_pa = -1;
state->hdr.vmx.flags = ~0;
- test_nested_state_expect_einval(vm, state);
+ test_nested_state_expect_einval(vcpu, state);
/* vmxon_pa cannot be the same address as vmcs_pa. */
set_default_vmx_state(state, state_sz);
state->hdr.vmx.vmxon_pa = 0;
state->hdr.vmx.vmcs12_pa = 0;
- test_nested_state_expect_einval(vm, state);
+ test_nested_state_expect_einval(vcpu, state);
/*
* Test that if we leave nesting the state reflects that when we get
@@ -233,8 +232,8 @@ void test_vmx_nested_state(struct kvm_vm *vm)
state->hdr.vmx.vmxon_pa = -1ull;
state->hdr.vmx.vmcs12_pa = -1ull;
state->flags = 0;
- test_nested_state(vm, state);
- vcpu_nested_state_get(vm, VCPU_ID, state);
+ test_nested_state(vcpu, state);
+ vcpu_nested_state_get(vcpu, state);
TEST_ASSERT(state->size >= sizeof(*state) && state->size <= state_sz,
"Size must be between %ld and %d. The size returned was %d.",
sizeof(*state), state_sz, state->size);
@@ -244,54 +243,36 @@ void test_vmx_nested_state(struct kvm_vm *vm)
free(state);
}
-void disable_vmx(struct kvm_vm *vm)
-{
- struct kvm_cpuid2 *cpuid = kvm_get_supported_cpuid();
- int i;
-
- for (i = 0; i < cpuid->nent; ++i)
- if (cpuid->entries[i].function == 1 &&
- cpuid->entries[i].index == 0)
- break;
- TEST_ASSERT(i != cpuid->nent, "CPUID function 1 not found");
-
- cpuid->entries[i].ecx &= ~CPUID_VMX;
- vcpu_set_cpuid(vm, VCPU_ID, cpuid);
- cpuid->entries[i].ecx |= CPUID_VMX;
-}
-
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
struct kvm_nested_state state;
+ struct kvm_vcpu *vcpu;
have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
- if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) {
- print_skip("KVM_CAP_NESTED_STATE not available");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
/*
* AMD currently does not implement set_nested_state, so for now we
* just early out.
*/
- nested_vmx_check_supported();
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
- vm = vm_create_default(VCPU_ID, 0, 0);
+ vm = vm_create_with_one_vcpu(&vcpu, NULL);
/*
* First run tests with VMX disabled to check error handling.
*/
- disable_vmx(vm);
+ vcpu_clear_cpuid_feature(vcpu, X86_FEATURE_VMX);
/* Passing a NULL kvm_nested_state causes a EFAULT. */
- test_nested_state_expect_efault(vm, NULL);
+ test_nested_state_expect_efault(vcpu, NULL);
/* 'size' cannot be smaller than sizeof(kvm_nested_state). */
set_default_state(&state);
state.size = 0;
- test_nested_state_expect_einval(vm, &state);
+ test_nested_state_expect_einval(vcpu, &state);
/*
* Setting the flags 0xf fails the flags check. The only flags that
@@ -302,7 +283,7 @@ int main(int argc, char *argv[])
*/
set_default_state(&state);
state.flags = 0xf;
- test_nested_state_expect_einval(vm, &state);
+ test_nested_state_expect_einval(vcpu, &state);
/*
* If KVM_STATE_NESTED_RUN_PENDING is set then
@@ -310,9 +291,9 @@ int main(int argc, char *argv[])
*/
set_default_state(&state);
state.flags = KVM_STATE_NESTED_RUN_PENDING;
- test_nested_state_expect_einval(vm, &state);
+ test_nested_state_expect_einval(vcpu, &state);
- test_vmx_nested_state(vm);
+ test_vmx_nested_state(vcpu);
kvm_vm_free(vm);
return 0;
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
index 19b35c607dc6..5943187e8594 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
@@ -32,8 +32,6 @@
#define MSR_IA32_TSC_ADJUST 0x3b
#endif
-#define VCPU_ID 5
-
#define TSC_ADJUST_VALUE (1ll << 32)
#define TSC_OFFSET_VALUE -(1ll << 48)
@@ -127,28 +125,29 @@ static void report(int64_t val)
int main(int argc, char *argv[])
{
vm_vaddr_t vmx_pages_gva;
+ struct kvm_vcpu *vcpu;
- nested_vmx_check_supported();
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
- vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
+ vm = vm_create_with_one_vcpu(&vcpu, (void *) l1_guest_code);
/* Allocate VMX pages and shared descriptors (vmx_pages). */
vcpu_alloc_vmx(vm, &vmx_pages_gva);
- vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+ vcpu_args_set(vcpu, 1, vmx_pages_gva);
for (;;) {
- volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
- vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- TEST_FAIL("%s", (const char *)uc.args[0]);
+ REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
report(uc.args[1]);
diff --git a/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c b/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c
index afbbc40df884..3d272d7f961e 100644
--- a/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c
@@ -39,9 +39,6 @@
/* Default delay between migrate_pages calls (microseconds) */
#define DEFAULT_DELAY_USECS 500000
-#define HALTER_VCPU_ID 0
-#define SENDER_VCPU_ID 1
-
/*
* Vector for IPI from sender vCPU to halting vCPU.
* Value is arbitrary and was chosen for the alternating bit pattern. Any
@@ -79,8 +76,7 @@ struct test_data_page {
struct thread_params {
struct test_data_page *data;
- struct kvm_vm *vm;
- uint32_t vcpu_id;
+ struct kvm_vcpu *vcpu;
uint64_t *pipis_rcvd; /* host address of ipis_rcvd global */
};
@@ -198,6 +194,7 @@ static void sender_guest_code(struct test_data_page *data)
static void *vcpu_thread(void *arg)
{
struct thread_params *params = (struct thread_params *)arg;
+ struct kvm_vcpu *vcpu = params->vcpu;
struct ucall uc;
int old;
int r;
@@ -206,17 +203,17 @@ static void *vcpu_thread(void *arg)
r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
TEST_ASSERT(r == 0,
"pthread_setcanceltype failed on vcpu_id=%u with errno=%d",
- params->vcpu_id, r);
+ vcpu->id, r);
- fprintf(stderr, "vCPU thread running vCPU %u\n", params->vcpu_id);
- vcpu_run(params->vm, params->vcpu_id);
- exit_reason = vcpu_state(params->vm, params->vcpu_id)->exit_reason;
+ fprintf(stderr, "vCPU thread running vCPU %u\n", vcpu->id);
+ vcpu_run(vcpu);
+ exit_reason = vcpu->run->exit_reason;
TEST_ASSERT(exit_reason == KVM_EXIT_IO,
"vCPU %u exited with unexpected exit reason %u-%s, expected KVM_EXIT_IO",
- params->vcpu_id, exit_reason, exit_reason_str(exit_reason));
+ vcpu->id, exit_reason, exit_reason_str(exit_reason));
- if (get_ucall(params->vm, params->vcpu_id, &uc) == UCALL_ABORT) {
+ if (get_ucall(vcpu, &uc) == UCALL_ABORT) {
TEST_ASSERT(false,
"vCPU %u exited with error: %s.\n"
"Sending vCPU sent %lu IPIs to halting vCPU\n"
@@ -224,7 +221,7 @@ static void *vcpu_thread(void *arg)
"Halter TPR=%#x PPR=%#x LVR=%#x\n"
"Migrations attempted: %lu\n"
"Migrations completed: %lu\n",
- params->vcpu_id, (const char *)uc.args[0],
+ vcpu->id, (const char *)uc.args[0],
params->data->ipis_sent, params->data->hlt_count,
params->data->wake_count,
*params->pipis_rcvd, params->data->halter_tpr,
@@ -236,7 +233,7 @@ static void *vcpu_thread(void *arg)
return NULL;
}
-static void cancel_join_vcpu_thread(pthread_t thread, uint32_t vcpu_id)
+static void cancel_join_vcpu_thread(pthread_t thread, struct kvm_vcpu *vcpu)
{
void *retval;
int r;
@@ -244,12 +241,12 @@ static void cancel_join_vcpu_thread(pthread_t thread, uint32_t vcpu_id)
r = pthread_cancel(thread);
TEST_ASSERT(r == 0,
"pthread_cancel on vcpu_id=%d failed with errno=%d",
- vcpu_id, r);
+ vcpu->id, r);
r = pthread_join(thread, &retval);
TEST_ASSERT(r == 0,
"pthread_join on vcpu_id=%d failed with errno=%d",
- vcpu_id, r);
+ vcpu->id, r);
TEST_ASSERT(retval == PTHREAD_CANCELED,
"expected retval=%p, got %p", PTHREAD_CANCELED,
retval);
@@ -415,34 +412,30 @@ int main(int argc, char *argv[])
if (delay_usecs <= 0)
delay_usecs = DEFAULT_DELAY_USECS;
- vm = vm_create_default(HALTER_VCPU_ID, 0, halter_guest_code);
- params[0].vm = vm;
- params[1].vm = vm;
+ vm = vm_create_with_one_vcpu(&params[0].vcpu, halter_guest_code);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, HALTER_VCPU_ID);
+ vcpu_init_descriptor_tables(params[0].vcpu);
vm_install_exception_handler(vm, IPI_VECTOR, guest_ipi_handler);
virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
- vm_vcpu_add_default(vm, SENDER_VCPU_ID, sender_guest_code);
+ params[1].vcpu = vm_vcpu_add(vm, 1, sender_guest_code);
test_data_page_vaddr = vm_vaddr_alloc_page(vm);
- data =
- (struct test_data_page *)addr_gva2hva(vm, test_data_page_vaddr);
+ data = addr_gva2hva(vm, test_data_page_vaddr);
memset(data, 0, sizeof(*data));
params[0].data = data;
params[1].data = data;
- vcpu_args_set(vm, HALTER_VCPU_ID, 1, test_data_page_vaddr);
- vcpu_args_set(vm, SENDER_VCPU_ID, 1, test_data_page_vaddr);
+ vcpu_args_set(params[0].vcpu, 1, test_data_page_vaddr);
+ vcpu_args_set(params[1].vcpu, 1, test_data_page_vaddr);
pipis_rcvd = (uint64_t *)addr_gva2hva(vm, (uint64_t)&ipis_rcvd);
params[0].pipis_rcvd = pipis_rcvd;
params[1].pipis_rcvd = pipis_rcvd;
/* Start halter vCPU thread and wait for it to execute first HLT. */
- params[0].vcpu_id = HALTER_VCPU_ID;
r = pthread_create(&threads[0], NULL, vcpu_thread, &params[0]);
TEST_ASSERT(r == 0,
"pthread_create halter failed errno=%d", errno);
@@ -462,7 +455,6 @@ int main(int argc, char *argv[])
"Halter vCPU thread reported its APIC ID: %u after %d seconds.\n",
data->halter_apic_id, wait_secs);
- params[1].vcpu_id = SENDER_VCPU_ID;
r = pthread_create(&threads[1], NULL, vcpu_thread, &params[1]);
TEST_ASSERT(r == 0, "pthread_create sender failed errno=%d", errno);
@@ -478,8 +470,8 @@ int main(int argc, char *argv[])
/*
* Cancel threads and wait for them to stop.
*/
- cancel_join_vcpu_thread(threads[0], HALTER_VCPU_ID);
- cancel_join_vcpu_thread(threads[1], SENDER_VCPU_ID);
+ cancel_join_vcpu_thread(threads[0], params[0].vcpu);
+ cancel_join_vcpu_thread(threads[1], params[1].vcpu);
fprintf(stderr,
"Test successful after running for %d seconds.\n"
diff --git a/tools/testing/selftests/kvm/x86_64/xapic_state_test.c b/tools/testing/selftests/kvm/x86_64/xapic_state_test.c
index 0792334ba243..6f7a5ef66718 100644
--- a/tools/testing/selftests/kvm/x86_64/xapic_state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xapic_state_test.c
@@ -11,8 +11,8 @@
#include "processor.h"
#include "test_util.h"
-struct kvm_vcpu {
- uint32_t id;
+struct xapic_vcpu {
+ struct kvm_vcpu *vcpu;
bool is_x2apic;
};
@@ -47,8 +47,9 @@ static void x2apic_guest_code(void)
} while (1);
}
-static void ____test_icr(struct kvm_vm *vm, struct kvm_vcpu *vcpu, uint64_t val)
+static void ____test_icr(struct xapic_vcpu *x, uint64_t val)
{
+ struct kvm_vcpu *vcpu = x->vcpu;
struct kvm_lapic_state xapic;
struct ucall uc;
uint64_t icr;
@@ -58,40 +59,55 @@ static void ____test_icr(struct kvm_vm *vm, struct kvm_vcpu *vcpu, uint64_t val)
* all bits are valid and should not be modified by KVM (ignoring the
* fact that vectors 0-15 are technically illegal).
*/
- vcpu_ioctl(vm, vcpu->id, KVM_GET_LAPIC, &xapic);
+ vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic);
*((u32 *)&xapic.regs[APIC_IRR]) = val;
*((u32 *)&xapic.regs[APIC_IRR + 0x10]) = val >> 32;
- vcpu_ioctl(vm, vcpu->id, KVM_SET_LAPIC, &xapic);
+ vcpu_ioctl(vcpu, KVM_SET_LAPIC, &xapic);
- vcpu_run(vm, vcpu->id);
- ASSERT_EQ(get_ucall(vm, vcpu->id, &uc), UCALL_SYNC);
+ vcpu_run(vcpu);
+ ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC);
ASSERT_EQ(uc.args[1], val);
- vcpu_ioctl(vm, vcpu->id, KVM_GET_LAPIC, &xapic);
+ vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic);
icr = (u64)(*((u32 *)&xapic.regs[APIC_ICR])) |
(u64)(*((u32 *)&xapic.regs[APIC_ICR2])) << 32;
- if (!vcpu->is_x2apic)
+ if (!x->is_x2apic) {
val &= (-1u | (0xffull << (32 + 24)));
- ASSERT_EQ(icr, val & ~APIC_ICR_BUSY);
+ ASSERT_EQ(icr, val & ~APIC_ICR_BUSY);
+ } else {
+ ASSERT_EQ(icr & ~APIC_ICR_BUSY, val & ~APIC_ICR_BUSY);
+ }
}
-static void __test_icr(struct kvm_vm *vm, struct kvm_vcpu *vcpu, uint64_t val)
+#define X2APIC_RSVED_BITS_MASK (GENMASK_ULL(31,20) | \
+ GENMASK_ULL(17,16) | \
+ GENMASK_ULL(13,13))
+
+static void __test_icr(struct xapic_vcpu *x, uint64_t val)
{
- ____test_icr(vm, vcpu, val | APIC_ICR_BUSY);
- ____test_icr(vm, vcpu, val & ~(u64)APIC_ICR_BUSY);
+ if (x->is_x2apic) {
+ /* Hardware writing vICR register requires reserved bits 31:20,
+ * 17:16 and 13 kept as zero to avoid #GP exception. Data value
+ * written to vICR should mask out those bits above.
+ */
+ val &= ~X2APIC_RSVED_BITS_MASK;
+ }
+ ____test_icr(x, val | APIC_ICR_BUSY);
+ ____test_icr(x, val & ~(u64)APIC_ICR_BUSY);
}
-static void test_icr(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
+static void test_icr(struct xapic_vcpu *x)
{
+ struct kvm_vcpu *vcpu = x->vcpu;
uint64_t icr, i, j;
icr = APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_FIXED;
for (i = 0; i <= 0xff; i++)
- __test_icr(vm, vcpu, icr | i);
+ __test_icr(x, icr | i);
icr = APIC_INT_ASSERT | APIC_DM_FIXED;
for (i = 0; i <= 0xff; i++)
- __test_icr(vm, vcpu, icr | i);
+ __test_icr(x, icr | i);
/*
* Send all flavors of IPIs to non-existent vCPUs. TODO: use number of
@@ -100,32 +116,30 @@ static void test_icr(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
icr = APIC_INT_ASSERT | 0xff;
for (i = vcpu->id + 1; i < 0xff; i++) {
for (j = 0; j < 8; j++)
- __test_icr(vm, vcpu, i << (32 + 24) | APIC_INT_ASSERT | (j << 8));
+ __test_icr(x, i << (32 + 24) | icr | (j << 8));
}
/* And again with a shorthand destination for all types of IPIs. */
icr = APIC_DEST_ALLBUT | APIC_INT_ASSERT;
for (i = 0; i < 8; i++)
- __test_icr(vm, vcpu, icr | (i << 8));
+ __test_icr(x, icr | (i << 8));
/* And a few garbage value, just make sure it's an IRQ (blocked). */
- __test_icr(vm, vcpu, 0xa5a5a5a5a5a5a5a5 & ~APIC_DM_FIXED_MASK);
- __test_icr(vm, vcpu, 0x5a5a5a5a5a5a5a5a & ~APIC_DM_FIXED_MASK);
- __test_icr(vm, vcpu, -1ull & ~APIC_DM_FIXED_MASK);
+ __test_icr(x, 0xa5a5a5a5a5a5a5a5 & ~APIC_DM_FIXED_MASK);
+ __test_icr(x, 0x5a5a5a5a5a5a5a5a & ~APIC_DM_FIXED_MASK);
+ __test_icr(x, -1ull & ~APIC_DM_FIXED_MASK);
}
int main(int argc, char *argv[])
{
- struct kvm_vcpu vcpu = {
- .id = 0,
+ struct xapic_vcpu x = {
+ .vcpu = NULL,
.is_x2apic = true,
};
- struct kvm_cpuid2 *cpuid;
struct kvm_vm *vm;
- int i;
- vm = vm_create_default(vcpu.id, 0, x2apic_guest_code);
- test_icr(vm, &vcpu);
+ vm = vm_create_with_one_vcpu(&x.vcpu, x2apic_guest_code);
+ test_icr(&x);
kvm_vm_free(vm);
/*
@@ -133,18 +147,12 @@ int main(int argc, char *argv[])
* the guest in order to test AVIC. KVM disallows changing CPUID after
* KVM_RUN and AVIC is disabled if _any_ vCPU is allowed to use x2APIC.
*/
- vm = vm_create_default(vcpu.id, 0, xapic_guest_code);
- vcpu.is_x2apic = false;
+ vm = vm_create_with_one_vcpu(&x.vcpu, xapic_guest_code);
+ x.is_x2apic = false;
- cpuid = vcpu_get_cpuid(vm, vcpu.id);
- for (i = 0; i < cpuid->nent; i++) {
- if (cpuid->entries[i].function == 1)
- break;
- }
- cpuid->entries[i].ecx &= ~BIT(21);
- vcpu_set_cpuid(vm, vcpu.id, cpuid);
+ vcpu_clear_cpuid_feature(x.vcpu, X86_FEATURE_X2APIC);
virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
- test_icr(vm, &vcpu);
+ test_icr(&x);
kvm_vm_free(vm);
}
diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
index bcd370827859..8a5cb800f50e 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
@@ -18,8 +18,6 @@
#include <sys/eventfd.h>
-#define VCPU_ID 5
-
#define SHINFO_REGION_GVA 0xc0000000ULL
#define SHINFO_REGION_GPA 0xc0000000ULL
#define SHINFO_REGION_SLOT 10
@@ -38,12 +36,34 @@
#define EVTCHN_VECTOR 0x10
-static struct kvm_vm *vm;
+#define EVTCHN_TEST1 15
+#define EVTCHN_TEST2 66
+#define EVTCHN_TIMER 13
#define XEN_HYPERCALL_MSR 0x40000000
#define MIN_STEAL_TIME 50000
+#define __HYPERVISOR_set_timer_op 15
+#define __HYPERVISOR_sched_op 29
+#define __HYPERVISOR_event_channel_op 32
+
+#define SCHEDOP_poll 3
+
+#define EVTCHNOP_send 4
+
+#define EVTCHNSTAT_interdomain 2
+
+struct evtchn_send {
+ u32 port;
+};
+
+struct sched_poll {
+ u32 *ports;
+ unsigned int nr_ports;
+ u64 timeout;
+};
+
struct pvclock_vcpu_time_info {
u32 version;
u32 pad0;
@@ -106,15 +126,25 @@ struct {
struct kvm_irq_routing_entry entries[2];
} irq_routes;
+bool guest_saw_irq;
+
static void evtchn_handler(struct ex_regs *regs)
{
struct vcpu_info *vi = (void *)VCPU_INFO_VADDR;
vi->evtchn_upcall_pending = 0;
vi->evtchn_pending_sel = 0;
+ guest_saw_irq = true;
GUEST_SYNC(0x20);
}
+static void guest_wait_for_irq(void)
+{
+ while (!guest_saw_irq)
+ __asm__ __volatile__ ("rep nop" : : : "memory");
+ guest_saw_irq = false;
+}
+
static void guest_code(void)
{
struct vcpu_runstate_info *rs = (void *)RUNSTATE_VADDR;
@@ -127,6 +157,8 @@ static void guest_code(void)
/* Trigger an interrupt injection */
GUEST_SYNC(0);
+ guest_wait_for_irq();
+
/* Test having the host set runstates manually */
GUEST_SYNC(RUNSTATE_runnable);
GUEST_ASSERT(rs->time[RUNSTATE_runnable] != 0);
@@ -167,14 +199,132 @@ static void guest_code(void)
/* Now deliver an *unmasked* interrupt */
GUEST_SYNC(8);
- while (!si->evtchn_pending[1])
- __asm__ __volatile__ ("rep nop" : : : "memory");
+ guest_wait_for_irq();
/* Change memslots and deliver an interrupt */
GUEST_SYNC(9);
- for (;;)
- __asm__ __volatile__ ("rep nop" : : : "memory");
+ guest_wait_for_irq();
+
+ /* Deliver event channel with KVM_XEN_HVM_EVTCHN_SEND */
+ GUEST_SYNC(10);
+
+ guest_wait_for_irq();
+
+ GUEST_SYNC(11);
+
+ /* Our turn. Deliver event channel (to ourselves) with
+ * EVTCHNOP_send hypercall. */
+ unsigned long rax;
+ struct evtchn_send s = { .port = 127 };
+ __asm__ __volatile__ ("vmcall" :
+ "=a" (rax) :
+ "a" (__HYPERVISOR_event_channel_op),
+ "D" (EVTCHNOP_send),
+ "S" (&s));
+
+ GUEST_ASSERT(rax == 0);
+
+ guest_wait_for_irq();
+
+ GUEST_SYNC(12);
+
+ /* Deliver "outbound" event channel to an eventfd which
+ * happens to be one of our own irqfds. */
+ s.port = 197;
+ __asm__ __volatile__ ("vmcall" :
+ "=a" (rax) :
+ "a" (__HYPERVISOR_event_channel_op),
+ "D" (EVTCHNOP_send),
+ "S" (&s));
+
+ GUEST_ASSERT(rax == 0);
+
+ guest_wait_for_irq();
+
+ GUEST_SYNC(13);
+
+ /* Set a timer 100ms in the future. */
+ __asm__ __volatile__ ("vmcall" :
+ "=a" (rax) :
+ "a" (__HYPERVISOR_set_timer_op),
+ "D" (rs->state_entry_time + 100000000));
+ GUEST_ASSERT(rax == 0);
+
+ GUEST_SYNC(14);
+
+ /* Now wait for the timer */
+ guest_wait_for_irq();
+
+ GUEST_SYNC(15);
+
+ /* The host has 'restored' the timer. Just wait for it. */
+ guest_wait_for_irq();
+
+ GUEST_SYNC(16);
+
+ /* Poll for an event channel port which is already set */
+ u32 ports[1] = { EVTCHN_TIMER };
+ struct sched_poll p = {
+ .ports = ports,
+ .nr_ports = 1,
+ .timeout = 0,
+ };
+
+ __asm__ __volatile__ ("vmcall" :
+ "=a" (rax) :
+ "a" (__HYPERVISOR_sched_op),
+ "D" (SCHEDOP_poll),
+ "S" (&p));
+
+ GUEST_ASSERT(rax == 0);
+
+ GUEST_SYNC(17);
+
+ /* Poll for an unset port and wait for the timeout. */
+ p.timeout = 100000000;
+ __asm__ __volatile__ ("vmcall" :
+ "=a" (rax) :
+ "a" (__HYPERVISOR_sched_op),
+ "D" (SCHEDOP_poll),
+ "S" (&p));
+
+ GUEST_ASSERT(rax == 0);
+
+ GUEST_SYNC(18);
+
+ /* A timer will wake the masked port we're waiting on, while we poll */
+ p.timeout = 0;
+ __asm__ __volatile__ ("vmcall" :
+ "=a" (rax) :
+ "a" (__HYPERVISOR_sched_op),
+ "D" (SCHEDOP_poll),
+ "S" (&p));
+
+ GUEST_ASSERT(rax == 0);
+
+ GUEST_SYNC(19);
+
+ /* A timer wake an *unmasked* port which should wake us with an
+ * actual interrupt, while we're polling on a different port. */
+ ports[0]++;
+ p.timeout = 0;
+ __asm__ __volatile__ ("vmcall" :
+ "=a" (rax) :
+ "a" (__HYPERVISOR_sched_op),
+ "D" (SCHEDOP_poll),
+ "S" (&p));
+
+ GUEST_ASSERT(rax == 0);
+
+ guest_wait_for_irq();
+
+ GUEST_SYNC(20);
+
+ /* Timer should have fired already */
+ guest_wait_for_irq();
+
+ GUEST_SYNC(21);
}
static int cmp_timespec(struct timespec *a, struct timespec *b)
@@ -191,32 +341,36 @@ static int cmp_timespec(struct timespec *a, struct timespec *b)
return 0;
}
+static struct vcpu_info *vinfo;
+static struct kvm_vcpu *vcpu;
+
static void handle_alrm(int sig)
{
+ if (vinfo)
+ printf("evtchn_upcall_pending 0x%x\n", vinfo->evtchn_upcall_pending);
+ vcpu_dump(stdout, vcpu, 0);
TEST_FAIL("IRQ delivery timed out");
}
int main(int argc, char *argv[])
{
struct timespec min_ts, max_ts, vm_ts;
+ struct kvm_vm *vm;
bool verbose;
verbose = argc > 1 && (!strncmp(argv[1], "-v", 3) ||
!strncmp(argv[1], "--verbose", 10));
int xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM);
- if (!(xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO) ) {
- print_skip("KVM_XEN_HVM_CONFIG_SHARED_INFO not available");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO);
bool do_runstate_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_RUNSTATE);
bool do_eventfd_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL);
+ bool do_evtchn_tests = do_eventfd_tests && !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_SEND);
clock_gettime(CLOCK_REALTIME, &min_ts);
- vm = vm_create_default(VCPU_ID, 0, (void *) guest_code);
- vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
/* Map a region for the shared_info page */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
@@ -232,6 +386,12 @@ int main(int argc, char *argv[])
.flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL,
.msr = XEN_HYPERCALL_MSR,
};
+
+ /* Let the kernel know that we *will* use it for sending all
+ * event channels, which lets it intercept SCHEDOP_poll */
+ if (do_evtchn_tests)
+ hvmc.flags |= KVM_XEN_HVM_CONFIG_EVTCHN_SEND;
+
vm_ioctl(vm, KVM_XEN_HVM_CONFIG, &hvmc);
struct kvm_xen_hvm_attr lm = {
@@ -260,13 +420,13 @@ int main(int argc, char *argv[])
.type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO,
.u.gpa = VCPU_INFO_ADDR,
};
- vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &vi);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &vi);
struct kvm_xen_vcpu_attr pvclock = {
.type = KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
.u.gpa = PVTIME_ADDR,
};
- vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &pvclock);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &pvclock);
struct kvm_xen_hvm_attr vec = {
.type = KVM_XEN_ATTR_TYPE_UPCALL_VECTOR,
@@ -275,7 +435,7 @@ int main(int argc, char *argv[])
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &vec);
vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, VCPU_ID);
+ vcpu_init_descriptor_tables(vcpu);
vm_install_exception_handler(vm, EVTCHN_VECTOR, evtchn_handler);
if (do_runstate_tests) {
@@ -283,7 +443,7 @@ int main(int argc, char *argv[])
.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR,
.u.gpa = RUNSTATE_ADDR,
};
- vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &st);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &st);
}
int irq_fd[2] = { -1, -1 };
@@ -294,7 +454,7 @@ int main(int argc, char *argv[])
/* Unexpected, but not a KVM failure */
if (irq_fd[0] == -1 || irq_fd[1] == -1)
- do_eventfd_tests = false;
+ do_evtchn_tests = do_eventfd_tests = false;
}
if (do_eventfd_tests) {
@@ -302,17 +462,17 @@ int main(int argc, char *argv[])
irq_routes.entries[0].gsi = 32;
irq_routes.entries[0].type = KVM_IRQ_ROUTING_XEN_EVTCHN;
- irq_routes.entries[0].u.xen_evtchn.port = 15;
- irq_routes.entries[0].u.xen_evtchn.vcpu = VCPU_ID;
+ irq_routes.entries[0].u.xen_evtchn.port = EVTCHN_TEST1;
+ irq_routes.entries[0].u.xen_evtchn.vcpu = vcpu->id;
irq_routes.entries[0].u.xen_evtchn.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
irq_routes.entries[1].gsi = 33;
irq_routes.entries[1].type = KVM_IRQ_ROUTING_XEN_EVTCHN;
- irq_routes.entries[1].u.xen_evtchn.port = 66;
- irq_routes.entries[1].u.xen_evtchn.vcpu = VCPU_ID;
+ irq_routes.entries[1].u.xen_evtchn.port = EVTCHN_TEST2;
+ irq_routes.entries[1].u.xen_evtchn.vcpu = vcpu->id;
irq_routes.entries[1].u.xen_evtchn.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
- vm_ioctl(vm, KVM_SET_GSI_ROUTING, &irq_routes);
+ vm_ioctl(vm, KVM_SET_GSI_ROUTING, &irq_routes.info);
struct kvm_irqfd ifd = { };
@@ -329,7 +489,39 @@ int main(int argc, char *argv[])
sigaction(SIGALRM, &sa, NULL);
}
- struct vcpu_info *vinfo = addr_gpa2hva(vm, VCPU_INFO_VADDR);
+ struct kvm_xen_vcpu_attr tmr = {
+ .type = KVM_XEN_VCPU_ATTR_TYPE_TIMER,
+ .u.timer.port = EVTCHN_TIMER,
+ .u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL,
+ .u.timer.expires_ns = 0
+ };
+
+ if (do_evtchn_tests) {
+ struct kvm_xen_hvm_attr inj = {
+ .type = KVM_XEN_ATTR_TYPE_EVTCHN,
+ .u.evtchn.send_port = 127,
+ .u.evtchn.type = EVTCHNSTAT_interdomain,
+ .u.evtchn.flags = 0,
+ .u.evtchn.deliver.port.port = EVTCHN_TEST1,
+ .u.evtchn.deliver.port.vcpu = vcpu->id + 1,
+ .u.evtchn.deliver.port.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL,
+ };
+ vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &inj);
+
+ /* Test migration to a different vCPU */
+ inj.u.evtchn.flags = KVM_XEN_EVTCHN_UPDATE;
+ inj.u.evtchn.deliver.port.vcpu = vcpu->id;
+ vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &inj);
+
+ inj.u.evtchn.send_port = 197;
+ inj.u.evtchn.deliver.eventfd.port = 0;
+ inj.u.evtchn.deliver.eventfd.fd = irq_fd[1];
+ inj.u.evtchn.flags = 0;
+ vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &inj);
+
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
+ }
+ vinfo = addr_gpa2hva(vm, VCPU_INFO_VADDR);
vinfo->evtchn_upcall_pending = 0;
struct vcpu_runstate_info *rs = addr_gpa2hva(vm, RUNSTATE_ADDR);
@@ -338,19 +530,19 @@ int main(int argc, char *argv[])
bool evtchn_irq_expected = false;
for (;;) {
- volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
- vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- TEST_FAIL("%s", (const char *)uc.args[0]);
+ REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC: {
struct kvm_xen_vcpu_attr rst;
@@ -377,7 +569,7 @@ int main(int argc, char *argv[])
printf("Testing runstate %s\n", runstate_names[uc.args[1]]);
rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT;
rst.u.runstate.state = uc.args[1];
- vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &rst);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst);
break;
case 4:
@@ -392,7 +584,7 @@ int main(int argc, char *argv[])
0x6b6b - rs->time[RUNSTATE_offline];
rst.u.runstate.time_runnable = -rst.u.runstate.time_blocked -
rst.u.runstate.time_offline;
- vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &rst);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst);
break;
case 5:
@@ -404,7 +596,7 @@ int main(int argc, char *argv[])
rst.u.runstate.state_entry_time = 0x6b6b + 0x5a;
rst.u.runstate.time_blocked = 0x6b6b;
rst.u.runstate.time_offline = 0x5a;
- vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &rst);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst);
break;
case 6:
@@ -422,7 +614,7 @@ int main(int argc, char *argv[])
goto done;
if (verbose)
printf("Testing masked event channel\n");
- shinfo->evtchn_mask[0] = 0x8000;
+ shinfo->evtchn_mask[0] = 1UL << EVTCHN_TEST1;
eventfd_write(irq_fd[0], 1UL);
alarm(1);
break;
@@ -439,6 +631,9 @@ int main(int argc, char *argv[])
break;
case 9:
+ TEST_ASSERT(!evtchn_irq_expected,
+ "Expected event channel IRQ but it didn't happen");
+ shinfo->evtchn_pending[1] = 0;
if (verbose)
printf("Testing event channel after memslot change\n");
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
@@ -448,12 +643,153 @@ int main(int argc, char *argv[])
alarm(1);
break;
+ case 10:
+ TEST_ASSERT(!evtchn_irq_expected,
+ "Expected event channel IRQ but it didn't happen");
+ if (!do_evtchn_tests)
+ goto done;
+
+ shinfo->evtchn_pending[0] = 0;
+ if (verbose)
+ printf("Testing injection with KVM_XEN_HVM_EVTCHN_SEND\n");
+
+ struct kvm_irq_routing_xen_evtchn e;
+ e.port = EVTCHN_TEST2;
+ e.vcpu = vcpu->id;
+ e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
+
+ vm_ioctl(vm, KVM_XEN_HVM_EVTCHN_SEND, &e);
+ evtchn_irq_expected = true;
+ alarm(1);
+ break;
+
+ case 11:
+ TEST_ASSERT(!evtchn_irq_expected,
+ "Expected event channel IRQ but it didn't happen");
+ shinfo->evtchn_pending[1] = 0;
+
+ if (verbose)
+ printf("Testing guest EVTCHNOP_send direct to evtchn\n");
+ evtchn_irq_expected = true;
+ alarm(1);
+ break;
+
+ case 12:
+ TEST_ASSERT(!evtchn_irq_expected,
+ "Expected event channel IRQ but it didn't happen");
+ shinfo->evtchn_pending[0] = 0;
+
+ if (verbose)
+ printf("Testing guest EVTCHNOP_send to eventfd\n");
+ evtchn_irq_expected = true;
+ alarm(1);
+ break;
+
+ case 13:
+ TEST_ASSERT(!evtchn_irq_expected,
+ "Expected event channel IRQ but it didn't happen");
+ shinfo->evtchn_pending[1] = 0;
+
+ if (verbose)
+ printf("Testing guest oneshot timer\n");
+ break;
+
+ case 14:
+ memset(&tmr, 0, sizeof(tmr));
+ tmr.type = KVM_XEN_VCPU_ATTR_TYPE_TIMER;
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
+ TEST_ASSERT(tmr.u.timer.port == EVTCHN_TIMER,
+ "Timer port not returned");
+ TEST_ASSERT(tmr.u.timer.priority == KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL,
+ "Timer priority not returned");
+ TEST_ASSERT(tmr.u.timer.expires_ns > rs->state_entry_time,
+ "Timer expiry not returned");
+ evtchn_irq_expected = true;
+ alarm(1);
+ break;
+
+ case 15:
+ TEST_ASSERT(!evtchn_irq_expected,
+ "Expected event channel IRQ but it didn't happen");
+ shinfo->evtchn_pending[0] = 0;
+
+ if (verbose)
+ printf("Testing restored oneshot timer\n");
+
+ tmr.u.timer.expires_ns = rs->state_entry_time + 100000000;
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
+ evtchn_irq_expected = true;
+ alarm(1);
+ break;
+
+ case 16:
+ TEST_ASSERT(!evtchn_irq_expected,
+ "Expected event channel IRQ but it didn't happen");
+
+ if (verbose)
+ printf("Testing SCHEDOP_poll with already pending event\n");
+ shinfo->evtchn_pending[0] = shinfo->evtchn_mask[0] = 1UL << EVTCHN_TIMER;
+ alarm(1);
+ break;
+
+ case 17:
+ if (verbose)
+ printf("Testing SCHEDOP_poll timeout\n");
+ shinfo->evtchn_pending[0] = 0;
+ alarm(1);
+ break;
+
+ case 18:
+ if (verbose)
+ printf("Testing SCHEDOP_poll wake on masked event\n");
+
+ tmr.u.timer.expires_ns = rs->state_entry_time + 100000000;
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
+ alarm(1);
+ break;
+
+ case 19:
+ shinfo->evtchn_pending[0] = shinfo->evtchn_mask[0] = 0;
+ if (verbose)
+ printf("Testing SCHEDOP_poll wake on unmasked event\n");
+
+ evtchn_irq_expected = true;
+ tmr.u.timer.expires_ns = rs->state_entry_time + 100000000;
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
+
+ /* Read it back and check the pending time is reported correctly */
+ tmr.u.timer.expires_ns = 0;
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
+ TEST_ASSERT(tmr.u.timer.expires_ns == rs->state_entry_time + 100000000,
+ "Timer not reported pending");
+ alarm(1);
+ break;
+
+ case 20:
+ TEST_ASSERT(!evtchn_irq_expected,
+ "Expected event channel IRQ but it didn't happen");
+ /* Read timer and check it is no longer pending */
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr);
+ TEST_ASSERT(!tmr.u.timer.expires_ns, "Timer still reported pending");
+
+ shinfo->evtchn_pending[0] = 0;
+ if (verbose)
+ printf("Testing timer in the past\n");
+
+ evtchn_irq_expected = true;
+ tmr.u.timer.expires_ns = rs->state_entry_time - 100000000ULL;
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr);
+ alarm(1);
+ break;
+
+ case 21:
+ TEST_ASSERT(!evtchn_irq_expected,
+ "Expected event channel IRQ but it didn't happen");
+ goto done;
+
case 0x20:
TEST_ASSERT(evtchn_irq_expected, "Unexpected event channel IRQ");
evtchn_irq_expected = false;
- if (shinfo->evtchn_pending[1] &&
- shinfo->evtchn_pending[0])
- goto done;
break;
}
break;
@@ -466,6 +802,7 @@ int main(int argc, char *argv[])
}
done:
+ alarm(0);
clock_gettime(CLOCK_REALTIME, &max_ts);
/*
@@ -511,7 +848,7 @@ int main(int argc, char *argv[])
struct kvm_xen_vcpu_attr rst = {
.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA,
};
- vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_GET_ATTR, &rst);
+ vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &rst);
if (verbose) {
printf("Runstate: %s(%d), entry %" PRIu64 " ns\n",
diff --git a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
index b30fe9de1d4f..88914d48c65e 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
@@ -11,13 +11,9 @@
#include "kvm_util.h"
#include "processor.h"
-#define VCPU_ID 5
-
#define HCALL_REGION_GPA 0xc0000000ULL
#define HCALL_REGION_SLOT 10
-static struct kvm_vm *vm;
-
#define INPUTVALUE 17
#define ARGVALUE(x) (0xdeadbeef5a5a0000UL + x)
#define RETVALUE 0xcafef00dfbfbffffUL
@@ -84,14 +80,15 @@ static void guest_code(void)
int main(int argc, char *argv[])
{
- if (!(kvm_check_cap(KVM_CAP_XEN_HVM) &
- KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) ) {
- print_skip("KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL not available");
- exit(KSFT_SKIP);
- }
+ unsigned int xen_caps;
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+
+ xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM);
+ TEST_REQUIRE(xen_caps & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL);
- vm = vm_create_default(VCPU_ID, 0, (void *) guest_code);
- vcpu_set_hv_cpuid(vm, VCPU_ID);
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+ vcpu_set_hv_cpuid(vcpu);
struct kvm_xen_hvm_config hvmc = {
.flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL,
@@ -105,10 +102,10 @@ int main(int argc, char *argv[])
virt_map(vm, HCALL_REGION_GPA, HCALL_REGION_GPA, 2);
for (;;) {
- volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ volatile struct kvm_run *run = vcpu->run;
struct ucall uc;
- vcpu_run(vm, VCPU_ID);
+ vcpu_run(vcpu);
if (run->exit_reason == KVM_EXIT_XEN) {
ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL);
@@ -130,9 +127,9 @@ int main(int argc, char *argv[])
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (get_ucall(vm, VCPU_ID, &uc)) {
+ switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- TEST_FAIL("%s", (const char *)uc.args[0]);
+ REPORT_GUEST_ASSERT(uc);
/* NOT REACHED */
case UCALL_SYNC:
break;
diff --git a/tools/testing/selftests/kvm/x86_64/xss_msr_test.c b/tools/testing/selftests/kvm/x86_64/xss_msr_test.c
index 3529376747c2..e0ddf47362e7 100644
--- a/tools/testing/selftests/kvm/x86_64/xss_msr_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xss_msr_test.c
@@ -12,64 +12,44 @@
#include "kvm_util.h"
#include "vmx.h"
-#define VCPU_ID 1
#define MSR_BITS 64
-#define X86_FEATURE_XSAVES (1<<3)
-
-bool is_supported_msr(u32 msr_index)
-{
- struct kvm_msr_list *list;
- bool found = false;
- int i;
-
- list = kvm_get_msr_index_list();
- for (i = 0; i < list->nmsrs; ++i) {
- if (list->indices[i] == msr_index) {
- found = true;
- break;
- }
- }
-
- free(list);
- return found;
-}
-
int main(int argc, char *argv[])
{
- struct kvm_cpuid_entry2 *entry;
- bool xss_supported = false;
+ bool xss_in_msr_list;
struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu;
uint64_t xss_val;
int i, r;
/* Create VM */
- vm = vm_create_default(VCPU_ID, 0, 0);
+ vm = vm_create_with_one_vcpu(&vcpu, NULL);
- if (kvm_get_cpuid_max_basic() >= 0xd) {
- entry = kvm_get_supported_cpuid_index(0xd, 1);
- xss_supported = entry && !!(entry->eax & X86_FEATURE_XSAVES);
- }
- if (!xss_supported) {
- print_skip("IA32_XSS is not supported by the vCPU");
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XSAVES));
- xss_val = vcpu_get_msr(vm, VCPU_ID, MSR_IA32_XSS);
+ xss_val = vcpu_get_msr(vcpu, MSR_IA32_XSS);
TEST_ASSERT(xss_val == 0,
"MSR_IA32_XSS should be initialized to zero\n");
- vcpu_set_msr(vm, VCPU_ID, MSR_IA32_XSS, xss_val);
+ vcpu_set_msr(vcpu, MSR_IA32_XSS, xss_val);
+
/*
* At present, KVM only supports a guest IA32_XSS value of 0. Verify
* that trying to set the guest IA32_XSS to an unsupported value fails.
* Also, in the future when a non-zero value succeeds check that
- * IA32_XSS is in the KVM_GET_MSR_INDEX_LIST.
+ * IA32_XSS is in the list of MSRs to save/restore.
*/
+ xss_in_msr_list = kvm_msr_is_in_save_restore_list(MSR_IA32_XSS);
for (i = 0; i < MSR_BITS; ++i) {
- r = _vcpu_set_msr(vm, VCPU_ID, MSR_IA32_XSS, 1ull << i);
- TEST_ASSERT(r == 0 || is_supported_msr(MSR_IA32_XSS),
- "IA32_XSS was able to be set, but was not found in KVM_GET_MSR_INDEX_LIST.\n");
+ r = _vcpu_set_msr(vcpu, MSR_IA32_XSS, 1ull << i);
+
+ /*
+ * Setting a list of MSRs returns the entry that "faulted", or
+ * the last entry +1 if all MSRs were successfully written.
+ */
+ TEST_ASSERT(!r || r == 1, KVM_IOCTL_ERROR(KVM_SET_MSRS, r));
+ TEST_ASSERT(r != 1 || xss_in_msr_list,
+ "IA32_XSS was able to be set, but was not in save/restore list");
}
kvm_vm_free(vm);
diff --git a/tools/testing/selftests/landlock/Makefile b/tools/testing/selftests/landlock/Makefile
index 0b0049e133bb..a6959df28eb0 100644
--- a/tools/testing/selftests/landlock/Makefile
+++ b/tools/testing/selftests/landlock/Makefile
@@ -8,17 +8,11 @@ TEST_GEN_PROGS := $(src_test:.c=)
TEST_GEN_PROGS_EXTENDED := true
-KSFT_KHDR_INSTALL := 1
OVERRIDE_TARGETS := 1
include ../lib.mk
-khdr_dir = $(top_srcdir)/usr/include
-
-$(khdr_dir)/linux/landlock.h: khdr
- @:
-
$(OUTPUT)/true: true.c
$(LINK.c) $< $(LDLIBS) -o $@ -static
-$(OUTPUT)/%_test: %_test.c $(khdr_dir)/linux/landlock.h ../kselftest_harness.h common.h
- $(LINK.c) $< $(LDLIBS) -o $@ -lcap -I$(khdr_dir)
+$(OUTPUT)/%_test: %_test.c ../kselftest_harness.h common.h
+ $(LINK.c) $< $(LDLIBS) -o $@ -lcap
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 2a2d240cdc1b..947fc72413e9 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -7,10 +7,31 @@ else ifneq ($(filter -%,$(LLVM)),)
LLVM_SUFFIX := $(LLVM)
endif
-CC := $(LLVM_PREFIX)clang$(LLVM_SUFFIX)
+CLANG_TARGET_FLAGS_arm := arm-linux-gnueabi
+CLANG_TARGET_FLAGS_arm64 := aarch64-linux-gnu
+CLANG_TARGET_FLAGS_hexagon := hexagon-linux-musl
+CLANG_TARGET_FLAGS_m68k := m68k-linux-gnu
+CLANG_TARGET_FLAGS_mips := mipsel-linux-gnu
+CLANG_TARGET_FLAGS_powerpc := powerpc64le-linux-gnu
+CLANG_TARGET_FLAGS_riscv := riscv64-linux-gnu
+CLANG_TARGET_FLAGS_s390 := s390x-linux-gnu
+CLANG_TARGET_FLAGS_x86 := x86_64-linux-gnu
+CLANG_TARGET_FLAGS := $(CLANG_TARGET_FLAGS_$(ARCH))
+
+ifeq ($(CROSS_COMPILE),)
+ifeq ($(CLANG_TARGET_FLAGS),)
+$(error Specify CROSS_COMPILE or add '--target=' option to lib.mk
+else
+CLANG_FLAGS += --target=$(CLANG_TARGET_FLAGS)
+endif # CLANG_TARGET_FLAGS
+else
+CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%))
+endif # CROSS_COMPILE
+
+CC := $(LLVM_PREFIX)clang$(LLVM_SUFFIX) $(CLANG_FLAGS) -fintegrated-as
else
CC := $(CROSS_COMPILE)gcc
-endif
+endif # LLVM
ifeq (0,$(MAKELEVEL))
ifeq ($(OUTPUT),)
@@ -30,45 +51,7 @@ TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS))
TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED))
TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES))
-ifdef KSFT_KHDR_INSTALL
-top_srcdir ?= ../../../..
-include $(top_srcdir)/scripts/subarch.include
-ARCH ?= $(SUBARCH)
-
-# set default goal to all, so make without a target runs all, even when
-# all isn't the first target in the file.
-.DEFAULT_GOAL := all
-
-# Invoke headers install with --no-builtin-rules to avoid circular
-# dependency in "make kselftest" case. In this case, second level
-# make inherits builtin-rules which will use the rule generate
-# Makefile.o and runs into
-# "Circular Makefile.o <- prepare dependency dropped."
-# and headers_install fails and test compile fails.
-# O= KBUILD_OUTPUT cases don't run into this error, since main Makefile
-# invokes them as sub-makes and --no-builtin-rules is not necessary,
-# but doesn't cause any failures. Keep it simple and use the same
-# flags in both cases.
-# Note that the support to install headers from lib.mk is necessary
-# when test Makefile is run directly with "make -C".
-# When local build is done, headers are installed in the default
-# INSTALL_HDR_PATH usr/include.
-.PHONY: khdr
-.NOTPARALLEL:
-khdr:
-ifndef KSFT_KHDR_INSTALL_DONE
-ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
- $(MAKE) --no-builtin-rules ARCH=$(ARCH) -C $(top_srcdir) headers_install
-else
- $(MAKE) --no-builtin-rules INSTALL_HDR_PATH=$$OUTPUT/usr \
- ARCH=$(ARCH) -C $(top_srcdir) headers_install
-endif
-endif
-
-all: khdr $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
-else
all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
-endif
define RUN_TESTS
BASE_DIR="$(selfdir)"; \
diff --git a/tools/testing/selftests/lkdtm/config b/tools/testing/selftests/lkdtm/config
index 46f39ee76208..5d52f64dfb43 100644
--- a/tools/testing/selftests/lkdtm/config
+++ b/tools/testing/selftests/lkdtm/config
@@ -2,10 +2,14 @@ CONFIG_LKDTM=y
CONFIG_DEBUG_LIST=y
CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_FORTIFY_SOURCE=y
+CONFIG_GCC_PLUGIN_STACKLEAK=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y
+CONFIG_INIT_ON_FREE_DEFAULT_ON=y
CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
CONFIG_UBSAN=y
CONFIG_UBSAN_BOUNDS=y
CONFIG_UBSAN_TRAP=y
CONFIG_STACKPROTECTOR_STRONG=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_SLUB_DEBUG_ON=y
diff --git a/tools/testing/selftests/lkdtm/tests.txt b/tools/testing/selftests/lkdtm/tests.txt
index 243c781f0780..65e53eb0840b 100644
--- a/tools/testing/selftests/lkdtm/tests.txt
+++ b/tools/testing/selftests/lkdtm/tests.txt
@@ -64,16 +64,17 @@ REFCOUNT_DEC_AND_TEST_SATURATED Saturation detected: still saturated
REFCOUNT_SUB_AND_TEST_SATURATED Saturation detected: still saturated
#REFCOUNT_TIMING timing only
#ATOMIC_TIMING timing only
-USERCOPY_HEAP_SIZE_TO
-USERCOPY_HEAP_SIZE_FROM
-USERCOPY_HEAP_WHITELIST_TO
-USERCOPY_HEAP_WHITELIST_FROM
+USERCOPY_SLAB_SIZE_TO
+USERCOPY_SLAB_SIZE_FROM
+USERCOPY_SLAB_WHITELIST_TO
+USERCOPY_SLAB_WHITELIST_FROM
USERCOPY_STACK_FRAME_TO
USERCOPY_STACK_FRAME_FROM
USERCOPY_STACK_BEYOND
USERCOPY_KERNEL
STACKLEAK_ERASING OK: the rest of the thread stack is properly erased
CFI_FORWARD_PROTO
+CFI_BACKWARD call trace:|ok: control flow unchanged
FORTIFIED_STRSCPY
FORTIFIED_OBJECT
FORTIFIED_SUBOBJECT
diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c
index 84fda3b49073..5c16159d0bcd 100644
--- a/tools/testing/selftests/mqueue/mq_perf_tests.c
+++ b/tools/testing/selftests/mqueue/mq_perf_tests.c
@@ -35,6 +35,7 @@
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/stat.h>
+#include <sys/param.h>
#include <mqueue.h>
#include <popt.h>
#include <error.h>
@@ -73,7 +74,6 @@ static char *usage =
char *MAX_MSGS = "/proc/sys/fs/mqueue/msg_max";
char *MAX_MSGSIZE = "/proc/sys/fs/mqueue/msgsize_max";
-#define min(a, b) ((a) < (b) ? (a) : (b))
#define MAX_CPUS 64
char *cpu_option_string;
int cpus_to_pin[MAX_CPUS];
@@ -560,7 +560,7 @@ int main(int argc, char *argv[])
"require root in order to modify\nsystem settings. "
"Exiting.\n");
- cpus_online = min(MAX_CPUS, sysconf(_SC_NPROCESSORS_ONLN));
+ cpus_online = MIN(MAX_CPUS, sysconf(_SC_NPROCESSORS_ONLN));
cpu_set = CPU_ALLOC(cpus_online);
if (cpu_set == NULL) {
perror("CPU_ALLOC()");
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 21a411b04890..892306bdb47d 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -5,6 +5,7 @@ socket
psock_fanout
psock_snd
psock_tpacket
+stress_reuseport_listen
reuseport_addr_any
reuseport_bpf
reuseport_bpf_cpu
@@ -35,4 +36,6 @@ test_unix_oob
gro
ioam6_parser
toeplitz
+tun
cmsg_sender
+unix_connect \ No newline at end of file
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index e1f998defd10..e2dfef8b78a7 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -11,7 +11,7 @@ TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh
TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh reuseport_addr_any.sh
TEST_PROGS += test_vxlan_fdb_changelink.sh so_txtime.sh ipv6_flowlabel.sh
TEST_PROGS += tcp_fastopen_backup_key.sh fcnal-test.sh l2tp.sh traceroute.sh
-TEST_PROGS += fin_ack_lat.sh fib_nexthop_multiprefix.sh fib_nexthops.sh
+TEST_PROGS += fin_ack_lat.sh fib_nexthop_multiprefix.sh fib_nexthops.sh fib_nexthop_nongw.sh
TEST_PROGS += altnames.sh icmp.sh icmp_redirect.sh ip6_gre_headroom.sh
TEST_PROGS += route_localnet.sh
TEST_PROGS += reuseaddr_ports_exhausted.sh
@@ -35,8 +35,13 @@ TEST_PROGS += cmsg_time.sh cmsg_ipv6.sh
TEST_PROGS += srv6_end_dt46_l3vpn_test.sh
TEST_PROGS += srv6_end_dt4_l3vpn_test.sh
TEST_PROGS += srv6_end_dt6_l3vpn_test.sh
+TEST_PROGS += srv6_hencap_red_l3vpn_test.sh
+TEST_PROGS += srv6_hl2encap_red_l2vpn_test.sh
TEST_PROGS += vrf_strict_mode_test.sh
TEST_PROGS += arp_ndisc_evict_nocarrier.sh
+TEST_PROGS += ndisc_unsolicited_na_test.sh
+TEST_PROGS += arp_ndisc_untracked_subnets.sh
+TEST_PROGS += stress_reuseport_listen.sh
TEST_PROGS_EXTENDED := in_netns.sh setup_loopback.sh setup_veth.sh
TEST_PROGS_EXTENDED += toeplitz_client.sh toeplitz.sh
TEST_GEN_FILES = socket nettest
@@ -52,14 +57,15 @@ TEST_GEN_FILES += ipsec
TEST_GEN_FILES += ioam6_parser
TEST_GEN_FILES += gro
TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
-TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
+TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls tun
TEST_GEN_FILES += toeplitz
TEST_GEN_FILES += cmsg_sender
+TEST_GEN_FILES += stress_reuseport_listen
TEST_PROGS += test_vxlan_vnifiltering.sh
+TEST_GEN_FILES += io_uring_zerocopy_tx
TEST_FILES := settings
-KSFT_KHDR_INSTALL := 1
include ../lib.mk
include bpf/Makefile
diff --git a/tools/testing/selftests/net/af_unix/Makefile b/tools/testing/selftests/net/af_unix/Makefile
index df341648f818..969620ae9928 100644
--- a/tools/testing/selftests/net/af_unix/Makefile
+++ b/tools/testing/selftests/net/af_unix/Makefile
@@ -1,2 +1,3 @@
-TEST_GEN_PROGS := test_unix_oob
+TEST_GEN_PROGS := test_unix_oob unix_connect
+
include ../../lib.mk
diff --git a/tools/testing/selftests/net/af_unix/unix_connect.c b/tools/testing/selftests/net/af_unix/unix_connect.c
new file mode 100644
index 000000000000..d799fd8f5c7c
--- /dev/null
+++ b/tools/testing/selftests/net/af_unix/unix_connect.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <sched.h>
+
+#include <stddef.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <sys/socket.h>
+#include <sys/un.h>
+
+#include "../../kselftest_harness.h"
+
+FIXTURE(unix_connect)
+{
+ int server, client;
+ int family;
+};
+
+FIXTURE_VARIANT(unix_connect)
+{
+ int type;
+ char sun_path[8];
+ int len;
+ int flags;
+ int err;
+};
+
+FIXTURE_VARIANT_ADD(unix_connect, stream_pathname)
+{
+ .type = SOCK_STREAM,
+ .sun_path = "test",
+ .len = 4 + 1,
+ .flags = 0,
+ .err = 0,
+};
+
+FIXTURE_VARIANT_ADD(unix_connect, stream_abstract)
+{
+ .type = SOCK_STREAM,
+ .sun_path = "\0test",
+ .len = 5,
+ .flags = 0,
+ .err = 0,
+};
+
+FIXTURE_VARIANT_ADD(unix_connect, stream_pathname_netns)
+{
+ .type = SOCK_STREAM,
+ .sun_path = "test",
+ .len = 4 + 1,
+ .flags = CLONE_NEWNET,
+ .err = 0,
+};
+
+FIXTURE_VARIANT_ADD(unix_connect, stream_abstract_netns)
+{
+ .type = SOCK_STREAM,
+ .sun_path = "\0test",
+ .len = 5,
+ .flags = CLONE_NEWNET,
+ .err = ECONNREFUSED,
+};
+
+FIXTURE_VARIANT_ADD(unix_connect, dgram_pathname)
+{
+ .type = SOCK_DGRAM,
+ .sun_path = "test",
+ .len = 4 + 1,
+ .flags = 0,
+ .err = 0,
+};
+
+FIXTURE_VARIANT_ADD(unix_connect, dgram_abstract)
+{
+ .type = SOCK_DGRAM,
+ .sun_path = "\0test",
+ .len = 5,
+ .flags = 0,
+ .err = 0,
+};
+
+FIXTURE_VARIANT_ADD(unix_connect, dgram_pathname_netns)
+{
+ .type = SOCK_DGRAM,
+ .sun_path = "test",
+ .len = 4 + 1,
+ .flags = CLONE_NEWNET,
+ .err = 0,
+};
+
+FIXTURE_VARIANT_ADD(unix_connect, dgram_abstract_netns)
+{
+ .type = SOCK_DGRAM,
+ .sun_path = "\0test",
+ .len = 5,
+ .flags = CLONE_NEWNET,
+ .err = ECONNREFUSED,
+};
+
+FIXTURE_SETUP(unix_connect)
+{
+ self->family = AF_UNIX;
+}
+
+FIXTURE_TEARDOWN(unix_connect)
+{
+ close(self->server);
+ close(self->client);
+
+ if (variant->sun_path[0])
+ remove("test");
+}
+
+TEST_F(unix_connect, test)
+{
+ socklen_t addrlen;
+ struct sockaddr_un addr = {
+ .sun_family = self->family,
+ };
+ int err;
+
+ self->server = socket(self->family, variant->type, 0);
+ ASSERT_NE(-1, self->server);
+
+ addrlen = offsetof(struct sockaddr_un, sun_path) + variant->len;
+ memcpy(&addr.sun_path, variant->sun_path, variant->len);
+
+ err = bind(self->server, (struct sockaddr *)&addr, addrlen);
+ ASSERT_EQ(0, err);
+
+ if (variant->type == SOCK_STREAM) {
+ err = listen(self->server, 32);
+ ASSERT_EQ(0, err);
+ }
+
+ err = unshare(variant->flags);
+ ASSERT_EQ(0, err);
+
+ self->client = socket(self->family, variant->type, 0);
+ ASSERT_LT(0, self->client);
+
+ err = connect(self->client, (struct sockaddr *)&addr, addrlen);
+ ASSERT_EQ(variant->err, err == -1 ? errno : 0);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh b/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh
new file mode 100755
index 000000000000..c899b446acb6
--- /dev/null
+++ b/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh
@@ -0,0 +1,308 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# 2 namespaces: one host and one router. Use arping from the host to send a
+# garp to the router. Router accepts or ignores based on its arp_accept
+# or accept_untracked_na configuration.
+
+TESTS="arp ndisc"
+
+ROUTER_NS="ns-router"
+ROUTER_NS_V6="ns-router-v6"
+ROUTER_INTF="veth-router"
+ROUTER_ADDR="10.0.10.1"
+ROUTER_ADDR_V6="2001:db8:abcd:0012::1"
+
+HOST_NS="ns-host"
+HOST_NS_V6="ns-host-v6"
+HOST_INTF="veth-host"
+HOST_ADDR="10.0.10.2"
+HOST_ADDR_V6="2001:db8:abcd:0012::2"
+
+SUBNET_WIDTH=24
+PREFIX_WIDTH_V6=64
+
+cleanup() {
+ ip netns del ${HOST_NS}
+ ip netns del ${ROUTER_NS}
+}
+
+cleanup_v6() {
+ ip netns del ${HOST_NS_V6}
+ ip netns del ${ROUTER_NS_V6}
+}
+
+setup() {
+ set -e
+ local arp_accept=$1
+
+ # Set up two namespaces
+ ip netns add ${ROUTER_NS}
+ ip netns add ${HOST_NS}
+
+ # Set up interfaces veth0 and veth1, which are pairs in separate
+ # namespaces. veth0 is veth-router, veth1 is veth-host.
+ # first, set up the inteface's link to the namespace
+ # then, set the interface "up"
+ ip netns exec ${ROUTER_NS} ip link add name ${ROUTER_INTF} \
+ type veth peer name ${HOST_INTF}
+
+ ip netns exec ${ROUTER_NS} ip link set dev ${ROUTER_INTF} up
+ ip netns exec ${ROUTER_NS} ip link set dev ${HOST_INTF} netns ${HOST_NS}
+
+ ip netns exec ${HOST_NS} ip link set dev ${HOST_INTF} up
+ ip netns exec ${ROUTER_NS} ip addr add ${ROUTER_ADDR}/${SUBNET_WIDTH} \
+ dev ${ROUTER_INTF}
+
+ ip netns exec ${HOST_NS} ip addr add ${HOST_ADDR}/${SUBNET_WIDTH} \
+ dev ${HOST_INTF}
+ ip netns exec ${HOST_NS} ip route add default via ${HOST_ADDR} \
+ dev ${HOST_INTF}
+ ip netns exec ${ROUTER_NS} ip route add default via ${ROUTER_ADDR} \
+ dev ${ROUTER_INTF}
+
+ ROUTER_CONF=net.ipv4.conf.${ROUTER_INTF}
+ ip netns exec ${ROUTER_NS} sysctl -w \
+ ${ROUTER_CONF}.arp_accept=${arp_accept} >/dev/null 2>&1
+ set +e
+}
+
+setup_v6() {
+ set -e
+ local accept_untracked_na=$1
+
+ # Set up two namespaces
+ ip netns add ${ROUTER_NS_V6}
+ ip netns add ${HOST_NS_V6}
+
+ # Set up interfaces veth0 and veth1, which are pairs in separate
+ # namespaces. veth0 is veth-router, veth1 is veth-host.
+ # first, set up the inteface's link to the namespace
+ # then, set the interface "up"
+ ip -6 -netns ${ROUTER_NS_V6} link add name ${ROUTER_INTF} \
+ type veth peer name ${HOST_INTF}
+
+ ip -6 -netns ${ROUTER_NS_V6} link set dev ${ROUTER_INTF} up
+ ip -6 -netns ${ROUTER_NS_V6} link set dev ${HOST_INTF} netns \
+ ${HOST_NS_V6}
+
+ ip -6 -netns ${HOST_NS_V6} link set dev ${HOST_INTF} up
+ ip -6 -netns ${ROUTER_NS_V6} addr add \
+ ${ROUTER_ADDR_V6}/${PREFIX_WIDTH_V6} dev ${ROUTER_INTF} nodad
+
+ HOST_CONF=net.ipv6.conf.${HOST_INTF}
+ ip netns exec ${HOST_NS_V6} sysctl -qw ${HOST_CONF}.ndisc_notify=1
+ ip netns exec ${HOST_NS_V6} sysctl -qw ${HOST_CONF}.disable_ipv6=0
+ ip -6 -netns ${HOST_NS_V6} addr add ${HOST_ADDR_V6}/${PREFIX_WIDTH_V6} \
+ dev ${HOST_INTF}
+
+ ROUTER_CONF=net.ipv6.conf.${ROUTER_INTF}
+
+ ip netns exec ${ROUTER_NS_V6} sysctl -w \
+ ${ROUTER_CONF}.forwarding=1 >/dev/null 2>&1
+ ip netns exec ${ROUTER_NS_V6} sysctl -w \
+ ${ROUTER_CONF}.drop_unsolicited_na=0 >/dev/null 2>&1
+ ip netns exec ${ROUTER_NS_V6} sysctl -w \
+ ${ROUTER_CONF}.accept_untracked_na=${accept_untracked_na} \
+ >/dev/null 2>&1
+ set +e
+}
+
+verify_arp() {
+ local arp_accept=$1
+ local same_subnet=$2
+
+ neigh_show_output=$(ip netns exec ${ROUTER_NS} ip neigh get \
+ ${HOST_ADDR} dev ${ROUTER_INTF} 2>/dev/null)
+
+ if [ ${arp_accept} -eq 1 ]; then
+ # Neighbor entries expected
+ [[ ${neigh_show_output} ]]
+ elif [ ${arp_accept} -eq 2 ]; then
+ if [ ${same_subnet} -eq 1 ]; then
+ # Neighbor entries expected
+ [[ ${neigh_show_output} ]]
+ else
+ [[ -z "${neigh_show_output}" ]]
+ fi
+ else
+ [[ -z "${neigh_show_output}" ]]
+ fi
+ }
+
+arp_test_gratuitous() {
+ set -e
+ local arp_accept=$1
+ local same_subnet=$2
+
+ if [ ${arp_accept} -eq 2 ]; then
+ test_msg=("test_arp: "
+ "accept_arp=$1 "
+ "same_subnet=$2")
+ if [ ${same_subnet} -eq 0 ]; then
+ HOST_ADDR=10.0.11.3
+ else
+ HOST_ADDR=10.0.10.3
+ fi
+ else
+ test_msg=("test_arp: "
+ "accept_arp=$1")
+ fi
+ # Supply arp_accept option to set up which sets it in sysctl
+ setup ${arp_accept}
+ ip netns exec ${HOST_NS} arping -A -U ${HOST_ADDR} -c1 2>&1 >/dev/null
+
+ if verify_arp $1 $2; then
+ printf " TEST: %-60s [ OK ]\n" "${test_msg[*]}"
+ else
+ printf " TEST: %-60s [FAIL]\n" "${test_msg[*]}"
+ fi
+ cleanup
+ set +e
+}
+
+arp_test_gratuitous_combinations() {
+ arp_test_gratuitous 0
+ arp_test_gratuitous 1
+ arp_test_gratuitous 2 0 # Second entry indicates subnet or not
+ arp_test_gratuitous 2 1
+}
+
+cleanup_tcpdump() {
+ set -e
+ [[ ! -z ${tcpdump_stdout} ]] && rm -f ${tcpdump_stdout}
+ [[ ! -z ${tcpdump_stderr} ]] && rm -f ${tcpdump_stderr}
+ tcpdump_stdout=
+ tcpdump_stderr=
+ set +e
+}
+
+start_tcpdump() {
+ set -e
+ tcpdump_stdout=`mktemp`
+ tcpdump_stderr=`mktemp`
+ ip netns exec ${ROUTER_NS_V6} timeout 15s \
+ tcpdump --immediate-mode -tpni ${ROUTER_INTF} -c 1 \
+ "icmp6 && icmp6[0] == 136 && src ${HOST_ADDR_V6}" \
+ > ${tcpdump_stdout} 2> /dev/null
+ set +e
+}
+
+verify_ndisc() {
+ local accept_untracked_na=$1
+ local same_subnet=$2
+
+ neigh_show_output=$(ip -6 -netns ${ROUTER_NS_V6} neigh show \
+ to ${HOST_ADDR_V6} dev ${ROUTER_INTF} nud stale)
+
+ if [ ${accept_untracked_na} -eq 1 ]; then
+ # Neighbour entry expected to be present
+ [[ ${neigh_show_output} ]]
+ elif [ ${accept_untracked_na} -eq 2 ]; then
+ if [ ${same_subnet} -eq 1 ]; then
+ [[ ${neigh_show_output} ]]
+ else
+ [[ -z "${neigh_show_output}" ]]
+ fi
+ else
+ # Neighbour entry expected to be absent for all other cases
+ [[ -z "${neigh_show_output}" ]]
+ fi
+}
+
+ndisc_test_untracked_advertisements() {
+ set -e
+ test_msg=("test_ndisc: "
+ "accept_untracked_na=$1")
+
+ local accept_untracked_na=$1
+ local same_subnet=$2
+ if [ ${accept_untracked_na} -eq 2 ]; then
+ test_msg=("test_ndisc: "
+ "accept_untracked_na=$1 "
+ "same_subnet=$2")
+ if [ ${same_subnet} -eq 0 ]; then
+ # Not same subnet
+ HOST_ADDR_V6=2000:db8:abcd:0013::4
+ else
+ HOST_ADDR_V6=2001:db8:abcd:0012::3
+ fi
+ fi
+ setup_v6 $1 $2
+ start_tcpdump
+
+ if verify_ndisc $1 $2; then
+ printf " TEST: %-60s [ OK ]\n" "${test_msg[*]}"
+ else
+ printf " TEST: %-60s [FAIL]\n" "${test_msg[*]}"
+ fi
+
+ cleanup_tcpdump
+ cleanup_v6
+ set +e
+}
+
+ndisc_test_untracked_combinations() {
+ ndisc_test_untracked_advertisements 0
+ ndisc_test_untracked_advertisements 1
+ ndisc_test_untracked_advertisements 2 0
+ ndisc_test_untracked_advertisements 2 1
+}
+
+################################################################################
+# usage
+
+usage()
+{
+ cat <<EOF
+usage: ${0##*/} OPTS
+
+ -t <test> Test(s) to run (default: all)
+ (options: $TESTS)
+EOF
+}
+
+################################################################################
+# main
+
+while getopts ":t:h" opt; do
+ case $opt in
+ t) TESTS=$OPTARG;;
+ h) usage; exit 0;;
+ *) usage; exit 1;;
+ esac
+done
+
+if [ "$(id -u)" -ne 0 ];then
+ echo "SKIP: Need root privileges"
+ exit $ksft_skip;
+fi
+
+if [ ! -x "$(command -v ip)" ]; then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+if [ ! -x "$(command -v tcpdump)" ]; then
+ echo "SKIP: Could not run test without tcpdump tool"
+ exit $ksft_skip
+fi
+
+if [ ! -x "$(command -v arping)" ]; then
+ echo "SKIP: Could not run test without arping tool"
+ exit $ksft_skip
+fi
+
+# start clean
+cleanup &> /dev/null
+cleanup_v6 &> /dev/null
+
+for t in $TESTS
+do
+ case $t in
+ arp_test_gratuitous_combinations|arp) arp_test_gratuitous_combinations;;
+ ndisc_test_untracked_combinations|ndisc) \
+ ndisc_test_untracked_combinations;;
+ help) echo "Test names: $TESTS"; exit 0;;
+esac
+done
diff --git a/tools/testing/selftests/net/bpf/Makefile b/tools/testing/selftests/net/bpf/Makefile
index f91bf14bbee7..8ccaf8732eb2 100644
--- a/tools/testing/selftests/net/bpf/Makefile
+++ b/tools/testing/selftests/net/bpf/Makefile
@@ -2,6 +2,7 @@
CLANG ?= clang
CCINCLUDE += -I../../bpf
+CCINCLUDE += -I../../../../lib
CCINCLUDE += -I../../../../../usr/include/
TEST_CUSTOM_PROGS = $(OUTPUT)/bpf/nat6to4.o
@@ -10,5 +11,4 @@ all: $(TEST_CUSTOM_PROGS)
$(OUTPUT)/%.o: %.c
$(CLANG) -O2 -target bpf -c $< $(CCINCLUDE) -o $@
-clean:
- rm -f $(TEST_CUSTOM_PROGS)
+EXTRA_CLEAN := $(TEST_CUSTOM_PROGS)
diff --git a/tools/testing/selftests/net/cmsg_sender.c b/tools/testing/selftests/net/cmsg_sender.c
index bc2162909a1a..75dd83e39207 100644
--- a/tools/testing/selftests/net/cmsg_sender.c
+++ b/tools/testing/selftests/net/cmsg_sender.c
@@ -456,7 +456,7 @@ int main(int argc, char *argv[])
buf[1] = 0;
} else if (opt.sock.type == SOCK_RAW) {
struct udphdr hdr = { 1, 2, htons(opt.size), 0 };
- struct sockaddr_in6 *sin6 = (void *)ai->ai_addr;;
+ struct sockaddr_in6 *sin6 = (void *)ai->ai_addr;
memcpy(buf, &hdr, sizeof(hdr));
sin6->sin6_port = htons(opt.sock.proto);
diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
index 54701c8b0cd7..03b586760164 100755
--- a/tools/testing/selftests/net/fcnal-test.sh
+++ b/tools/testing/selftests/net/fcnal-test.sh
@@ -70,6 +70,10 @@ NSB_LO_IP6=2001:db8:2::2
NL_IP=172.17.1.1
NL_IP6=2001:db8:4::1
+# multicast and broadcast addresses
+MCAST_IP=224.0.0.1
+BCAST_IP=255.255.255.255
+
MD5_PW=abc123
MD5_WRONG_PW=abc1234
@@ -308,6 +312,9 @@ addr2str()
127.0.0.1) echo "loopback";;
::1) echo "IPv6 loopback";;
+ ${BCAST_IP}) echo "broadcast";;
+ ${MCAST_IP}) echo "multicast";;
+
${NSA_IP}) echo "ns-A IP";;
${NSA_IP6}) echo "ns-A IPv6";;
${NSA_LO_IP}) echo "ns-A loopback IP";;
@@ -1793,12 +1800,33 @@ ipv4_addr_bind_novrf()
done
#
- # raw socket with nonlocal bind
+ # tests for nonlocal bind
#
a=${NL_IP}
log_start
- run_cmd nettest -s -R -P icmp -f -l ${a} -I ${NSA_DEV} -b
- log_test_addr ${a} $? 0 "Raw socket bind to nonlocal address after device bind"
+ run_cmd nettest -s -R -f -l ${a} -b
+ log_test_addr ${a} $? 0 "Raw socket bind to nonlocal address"
+
+ log_start
+ run_cmd nettest -s -f -l ${a} -b
+ log_test_addr ${a} $? 0 "TCP socket bind to nonlocal address"
+
+ log_start
+ run_cmd nettest -s -D -P icmp -f -l ${a} -b
+ log_test_addr ${a} $? 0 "ICMP socket bind to nonlocal address"
+
+ #
+ # check that ICMP sockets cannot bind to broadcast and multicast addresses
+ #
+ a=${BCAST_IP}
+ log_start
+ run_cmd nettest -s -D -P icmp -l ${a} -b
+ log_test_addr ${a} $? 1 "ICMP socket bind to broadcast address"
+
+ a=${MCAST_IP}
+ log_start
+ run_cmd nettest -s -D -P icmp -l ${a} -b
+ log_test_addr ${a} $? 1 "ICMP socket bind to multicast address"
#
# tcp sockets
@@ -1850,13 +1878,34 @@ ipv4_addr_bind_vrf()
log_test_addr ${a} $? 1 "Raw socket bind to out of scope address after VRF bind"
#
- # raw socket with nonlocal bind
+ # tests for nonlocal bind
#
a=${NL_IP}
log_start
- run_cmd nettest -s -R -P icmp -f -l ${a} -I ${VRF} -b
+ run_cmd nettest -s -R -f -l ${a} -I ${VRF} -b
log_test_addr ${a} $? 0 "Raw socket bind to nonlocal address after VRF bind"
+ log_start
+ run_cmd nettest -s -f -l ${a} -I ${VRF} -b
+ log_test_addr ${a} $? 0 "TCP socket bind to nonlocal address after VRF bind"
+
+ log_start
+ run_cmd nettest -s -D -P icmp -f -l ${a} -I ${VRF} -b
+ log_test_addr ${a} $? 0 "ICMP socket bind to nonlocal address after VRF bind"
+
+ #
+ # check that ICMP sockets cannot bind to broadcast and multicast addresses
+ #
+ a=${BCAST_IP}
+ log_start
+ run_cmd nettest -s -D -P icmp -l ${a} -I ${VRF} -b
+ log_test_addr ${a} $? 1 "ICMP socket bind to broadcast address after VRF bind"
+
+ a=${MCAST_IP}
+ log_start
+ run_cmd nettest -s -D -P icmp -l ${a} -I ${VRF} -b
+ log_test_addr ${a} $? 1 "ICMP socket bind to multicast address after VRF bind"
+
#
# tcp sockets
#
@@ -1889,10 +1938,12 @@ ipv4_addr_bind()
log_subsection "No VRF"
setup
+ set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
ipv4_addr_bind_novrf
log_subsection "With VRF"
setup "yes"
+ set_sysctl net.ipv4.ping_group_range='0 2147483647' 2>/dev/null
ipv4_addr_bind_vrf
}
diff --git a/tools/testing/selftests/net/fib_nexthop_nongw.sh b/tools/testing/selftests/net/fib_nexthop_nongw.sh
new file mode 100755
index 000000000000..b7b928b38ce4
--- /dev/null
+++ b/tools/testing/selftests/net/fib_nexthop_nongw.sh
@@ -0,0 +1,119 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# ns: h1 | ns: h2
+# 192.168.0.1/24 |
+# eth0 |
+# | 192.168.1.1/32
+# veth0 <---|---> veth1
+# Validate source address selection for route without gateway
+
+PAUSE_ON_FAIL=no
+VERBOSE=0
+ret=0
+
+################################################################################
+# helpers
+
+log_test()
+{
+ local rc=$1
+ local expected=$2
+ local msg="$3"
+
+ if [ ${rc} -eq ${expected} ]; then
+ printf "TEST: %-60s [ OK ]\n" "${msg}"
+ nsuccess=$((nsuccess+1))
+ else
+ ret=1
+ nfail=$((nfail+1))
+ printf "TEST: %-60s [FAIL]\n" "${msg}"
+ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+ echo
+ echo "hit enter to continue, 'q' to quit"
+ read a
+ [ "$a" = "q" ] && exit 1
+ fi
+ fi
+
+ [ "$VERBOSE" = "1" ] && echo
+}
+
+run_cmd()
+{
+ local cmd="$*"
+ local out
+ local rc
+
+ if [ "$VERBOSE" = "1" ]; then
+ echo "COMMAND: $cmd"
+ fi
+
+ out=$(eval $cmd 2>&1)
+ rc=$?
+ if [ "$VERBOSE" = "1" -a -n "$out" ]; then
+ echo "$out"
+ fi
+
+ [ "$VERBOSE" = "1" ] && echo
+
+ return $rc
+}
+
+################################################################################
+# config
+setup()
+{
+ ip netns add h1
+ ip -n h1 link set lo up
+ ip netns add h2
+ ip -n h2 link set lo up
+
+ # Add a fake eth0 to support an ip address
+ ip -n h1 link add name eth0 type dummy
+ ip -n h1 link set eth0 up
+ ip -n h1 address add 192.168.0.1/24 dev eth0
+
+ # Configure veths (same @mac, arp off)
+ ip -n h1 link add name veth0 type veth peer name veth1 netns h2
+ ip -n h1 link set veth0 up
+
+ ip -n h2 link set veth1 up
+
+ # Configure @IP in the peer netns
+ ip -n h2 address add 192.168.1.1/32 dev veth1
+ ip -n h2 route add default dev veth1
+
+ # Add a nexthop without @gw and use it in a route
+ ip -n h1 nexthop add id 1 dev veth0
+ ip -n h1 route add 192.168.1.1 nhid 1
+}
+
+cleanup()
+{
+ ip netns del h1 2>/dev/null
+ ip netns del h2 2>/dev/null
+}
+
+trap cleanup EXIT
+
+################################################################################
+# main
+
+while getopts :pv o
+do
+ case $o in
+ p) PAUSE_ON_FAIL=yes;;
+ v) VERBOSE=1;;
+ esac
+done
+
+cleanup
+setup
+
+run_cmd ip -netns h1 route get 192.168.1.1
+log_test $? 0 "nexthop: get route with nexthop without gw"
+run_cmd ip netns exec h1 ping -c1 192.168.1.1
+log_test $? 0 "nexthop: ping through nexthop without gw"
+
+exit $ret
diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh
index b3bf5319bb0e..d5a0dd548989 100755
--- a/tools/testing/selftests/net/fib_nexthops.sh
+++ b/tools/testing/selftests/net/fib_nexthops.sh
@@ -56,6 +56,7 @@ TESTS="${ALL_TESTS}"
VERBOSE=0
PAUSE_ON_FAIL=no
PAUSE=no
+PING_TIMEOUT=5
nsid=100
@@ -882,13 +883,13 @@ ipv6_fcnal_runtime()
log_test $? 0 "Route delete"
run_cmd "$IP ro add 2001:db8:101::1/128 nhid 81"
- run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 2001:db8:101::1"
log_test $? 0 "Ping with nexthop"
run_cmd "$IP nexthop add id 82 via 2001:db8:92::2 dev veth3"
run_cmd "$IP nexthop add id 122 group 81/82"
run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 122"
- run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 2001:db8:101::1"
log_test $? 0 "Ping - multipath"
#
@@ -896,26 +897,26 @@ ipv6_fcnal_runtime()
#
run_cmd "$IP -6 nexthop add id 83 blackhole"
run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 83"
- run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 2001:db8:101::1"
log_test $? 2 "Ping - blackhole"
run_cmd "$IP nexthop replace id 83 via 2001:db8:91::2 dev veth1"
- run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 2001:db8:101::1"
log_test $? 0 "Ping - blackhole replaced with gateway"
run_cmd "$IP -6 nexthop replace id 83 blackhole"
- run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 2001:db8:101::1"
log_test $? 2 "Ping - gateway replaced by blackhole"
run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 122"
- run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 2001:db8:101::1"
if [ $? -eq 0 ]; then
run_cmd "$IP nexthop replace id 122 group 83"
- run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 2001:db8:101::1"
log_test $? 2 "Ping - group with blackhole"
run_cmd "$IP nexthop replace id 122 group 81/82"
- run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 2001:db8:101::1"
log_test $? 0 "Ping - group blackhole replaced with gateways"
else
log_test 2 0 "Ping - multipath failed"
@@ -1003,10 +1004,10 @@ ipv6_fcnal_runtime()
run_cmd "$IP nexthop add id 92 via 2001:db8:92::2 dev veth3"
run_cmd "$IP nexthop add id 93 group 91/92"
run_cmd "$IP -6 ro add default nhid 91"
- run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 2001:db8:101::1"
log_test $? 0 "Nexthop with default route and rpfilter"
run_cmd "$IP -6 ro replace default nhid 93"
- run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 2001:db8:101::1"
log_test $? 0 "Nexthop with multipath default route and rpfilter"
# TO-DO:
@@ -1460,13 +1461,13 @@ ipv4_fcnal_runtime()
#
run_cmd "$IP nexthop replace id 21 via 172.16.1.2 dev veth1"
run_cmd "$IP ro replace 172.16.101.1/32 nhid 21"
- run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 0 "Basic ping"
run_cmd "$IP nexthop replace id 22 via 172.16.2.2 dev veth3"
run_cmd "$IP nexthop add id 122 group 21/22"
run_cmd "$IP ro replace 172.16.101.1/32 nhid 122"
- run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 0 "Ping - multipath"
run_cmd "$IP ro delete 172.16.101.1/32 nhid 122"
@@ -1477,7 +1478,7 @@ ipv4_fcnal_runtime()
run_cmd "$IP nexthop add id 501 via 172.16.1.2 dev veth1"
run_cmd "$IP ro add default nhid 501"
run_cmd "$IP ro add default via 172.16.1.3 dev veth1 metric 20"
- run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 0 "Ping - multiple default routes, nh first"
# flip the order
@@ -1486,7 +1487,7 @@ ipv4_fcnal_runtime()
run_cmd "$IP ro add default via 172.16.1.2 dev veth1 metric 20"
run_cmd "$IP nexthop replace id 501 via 172.16.1.3 dev veth1"
run_cmd "$IP ro add default nhid 501 metric 20"
- run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 0 "Ping - multiple default routes, nh second"
run_cmd "$IP nexthop delete nhid 501"
@@ -1497,26 +1498,26 @@ ipv4_fcnal_runtime()
#
run_cmd "$IP nexthop add id 23 blackhole"
run_cmd "$IP ro replace 172.16.101.1/32 nhid 23"
- run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 2 "Ping - blackhole"
run_cmd "$IP nexthop replace id 23 via 172.16.1.2 dev veth1"
- run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 0 "Ping - blackhole replaced with gateway"
run_cmd "$IP nexthop replace id 23 blackhole"
- run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 2 "Ping - gateway replaced by blackhole"
run_cmd "$IP ro replace 172.16.101.1/32 nhid 122"
- run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
if [ $? -eq 0 ]; then
run_cmd "$IP nexthop replace id 122 group 23"
- run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 2 "Ping - group with blackhole"
run_cmd "$IP nexthop replace id 122 group 21/22"
- run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 0 "Ping - group blackhole replaced with gateways"
else
log_test 2 0 "Ping - multipath failed"
@@ -1543,7 +1544,7 @@ ipv4_fcnal_runtime()
run_cmd "$IP nexthop add id 24 via ${lladdr} dev veth1"
set +e
run_cmd "$IP ro replace 172.16.101.1/32 nhid 24"
- run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 0 "IPv6 nexthop with IPv4 route"
$IP neigh sh | grep -q "${lladdr} dev veth1"
@@ -1567,11 +1568,11 @@ ipv4_fcnal_runtime()
check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via inet6 ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1"
- run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 0 "IPv6 nexthop with IPv4 route"
run_cmd "$IP ro replace 172.16.101.1/32 via inet6 ${lladdr} dev veth1"
- run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 0 "IPv4 route with IPv6 gateway"
$IP neigh sh | grep -q "${lladdr} dev veth1"
@@ -1588,7 +1589,7 @@ ipv4_fcnal_runtime()
run_cmd "$IP ro del 172.16.101.1/32 via inet6 ${lladdr} dev veth1"
run_cmd "$IP -4 ro add default via inet6 ${lladdr} dev veth1"
- run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1"
+ run_cmd "ip netns exec me ping -c1 -w$PING_TIMEOUT 172.16.101.1"
log_test $? 0 "IPv4 default route with IPv6 gateway"
#
@@ -2253,6 +2254,7 @@ usage: ${0##*/} OPTS
-p Pause on fail
-P Pause after each test before cleanup
-v verbose mode (show commands and output)
+ -w Timeout for ping
Runtime test
-n num Number of nexthops to target
@@ -2265,7 +2267,7 @@ EOF
################################################################################
# main
-while getopts :t:pP46hv o
+while getopts :t:pP46hv:w: o
do
case $o in
t) TESTS=$OPTARG;;
@@ -2274,6 +2276,7 @@ do
p) PAUSE_ON_FAIL=yes;;
P) PAUSE=yes;;
v) VERBOSE=$(($VERBOSE + 1));;
+ w) PING_TIMEOUT=$OPTARG;;
h) usage; exit 0;;
*) usage; exit 1;;
esac
diff --git a/tools/testing/selftests/net/fib_rule_tests.sh b/tools/testing/selftests/net/fib_rule_tests.sh
index 4f70baad867d..c245476fa29d 100755
--- a/tools/testing/selftests/net/fib_rule_tests.sh
+++ b/tools/testing/selftests/net/fib_rule_tests.sh
@@ -20,6 +20,7 @@ SRC_IP6=2001:db8:1::3
DEV_ADDR=192.51.100.1
DEV_ADDR6=2001:db8:1::1
DEV=dummy0
+TESTS="fib_rule6 fib_rule4"
log_test()
{
@@ -302,6 +303,29 @@ run_fibrule_tests()
log_section "IPv6 fib rule"
fib_rule6_test
}
+################################################################################
+# usage
+
+usage()
+{
+ cat <<EOF
+usage: ${0##*/} OPTS
+
+ -t <test> Test(s) to run (default: all)
+ (options: $TESTS)
+EOF
+}
+
+################################################################################
+# main
+
+while getopts ":t:h" opt; do
+ case $opt in
+ t) TESTS=$OPTARG;;
+ h) usage; exit 0;;
+ *) usage; exit 1;;
+ esac
+done
if [ "$(id -u)" -ne 0 ];then
echo "SKIP: Need root privileges"
@@ -316,7 +340,16 @@ fi
# start clean
cleanup &> /dev/null
setup
-run_fibrule_tests
+for t in $TESTS
+do
+ case $t in
+ fib_rule6_test|fib_rule6) fib_rule6_test;;
+ fib_rule4_test|fib_rule4) fib_rule4_test;;
+
+ help) echo "Test names: $TESTS"; exit 0;;
+
+ esac
+done
cleanup
if [ "$TESTS" != "none" ]; then
diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile
index e811090f7748..a9c5c1be5088 100644
--- a/tools/testing/selftests/net/forwarding/Makefile
+++ b/tools/testing/selftests/net/forwarding/Makefile
@@ -2,6 +2,8 @@
TEST_PROGS = bridge_igmp.sh \
bridge_locked_port.sh \
+ bridge_mdb.sh \
+ bridge_mdb_port_down.sh \
bridge_mld.sh \
bridge_port_isolation.sh \
bridge_sticky_fdb.sh \
@@ -19,6 +21,7 @@ TEST_PROGS = bridge_igmp.sh \
gre_multipath_nh.sh \
gre_multipath.sh \
hw_stats_l3.sh \
+ hw_stats_l3_gre.sh \
ip6_forward_instats_vrf.sh \
ip6gre_custom_multipath_hash.sh \
ip6gre_flat_key.sh \
@@ -35,6 +38,7 @@ TEST_PROGS = bridge_igmp.sh \
ipip_hier_gre_key.sh \
ipip_hier_gre_keys.sh \
ipip_hier_gre.sh \
+ local_termination.sh \
loopback.sh \
mirror_gre_bound.sh \
mirror_gre_bridge_1d.sh \
@@ -50,6 +54,7 @@ TEST_PROGS = bridge_igmp.sh \
mirror_gre_vlan_bridge_1q.sh \
mirror_gre_vlan.sh \
mirror_vlan.sh \
+ no_forwarding.sh \
pedit_dsfield.sh \
pedit_ip.sh \
pedit_l4port.sh \
diff --git a/tools/testing/selftests/net/forwarding/bridge_mdb.sh b/tools/testing/selftests/net/forwarding/bridge_mdb.sh
new file mode 100755
index 000000000000..b1ba6876dd86
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/bridge_mdb.sh
@@ -0,0 +1,103 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Verify that adding host mdb entries work as intended for all types of
+# multicast filters: ipv4, ipv6, and mac
+
+ALL_TESTS="mdb_add_del_test"
+NUM_NETIFS=2
+
+TEST_GROUP_IP4="225.1.2.3"
+TEST_GROUP_IP6="ff02::42"
+TEST_GROUP_MAC="01:00:01:c0:ff:ee"
+
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+switch_create()
+{
+ # Enable multicast filtering
+ ip link add dev br0 type bridge mcast_snooping 1
+
+ ip link set dev $swp1 master br0
+
+ ip link set dev br0 up
+ ip link set dev $swp1 up
+}
+
+switch_destroy()
+{
+ ip link set dev $swp1 down
+ ip link del dev br0
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ vrf_prepare
+
+ h1_create
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+do_mdb_add_del()
+{
+ local group=$1
+ local flag=$2
+
+ RET=0
+ bridge mdb add dev br0 port br0 grp $group $flag 2>/dev/null
+ check_err $? "Failed adding $group to br0, port br0"
+
+ if [ -z "$flag" ]; then
+ flag="temp"
+ fi
+
+ bridge mdb show dev br0 | grep $group | grep -q $flag 2>/dev/null
+ check_err $? "$group not added with $flag flag"
+
+ bridge mdb del dev br0 port br0 grp $group 2>/dev/null
+ check_err $? "Failed deleting $group from br0, port br0"
+
+ bridge mdb show dev br0 | grep -q $group >/dev/null
+ check_err_fail 1 $? "$group still in mdb after delete"
+
+ log_test "MDB add/del group $group to bridge port br0"
+}
+
+mdb_add_del_test()
+{
+ do_mdb_add_del $TEST_GROUP_MAC permanent
+ do_mdb_add_del $TEST_GROUP_IP4
+ do_mdb_add_del $TEST_GROUP_IP6
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/bridge_mdb_port_down.sh b/tools/testing/selftests/net/forwarding/bridge_mdb_port_down.sh
new file mode 100755
index 000000000000..1a0480e71d83
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/bridge_mdb_port_down.sh
@@ -0,0 +1,118 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Verify that permanent mdb entries can be added to and deleted from bridge
+# interfaces that are down, and works correctly when done so.
+
+ALL_TESTS="add_del_to_port_down"
+NUM_NETIFS=4
+
+TEST_GROUP="239.10.10.10"
+TEST_GROUP_MAC="01:00:5e:0a:0a:0a"
+
+source lib.sh
+
+
+add_del_to_port_down() {
+ RET=0
+
+ ip link set dev $swp2 down
+ bridge mdb add dev br0 port "$swp2" grp $TEST_GROUP permanent 2>/dev/null
+ check_err $? "Failed adding mdb entry"
+
+ ip link set dev $swp2 up
+ setup_wait_dev $swp2
+ mcast_packet_test $TEST_GROUP_MAC 192.0.2.1 $TEST_GROUP $h1 $h2
+ check_fail $? "Traffic to $TEST_GROUP wasn't forwarded"
+
+ ip link set dev $swp2 down
+ bridge mdb show dev br0 | grep -q "$TEST_GROUP permanent" 2>/dev/null
+ check_err $? "MDB entry did not persist after link up/down"
+
+ bridge mdb del dev br0 port "$swp2" grp $TEST_GROUP 2>/dev/null
+ check_err $? "Failed deleting mdb entry"
+
+ ip link set dev $swp2 up
+ setup_wait_dev $swp2
+ mcast_packet_test $TEST_GROUP_MAC 192.0.2.1 $TEST_GROUP $h1 $h2
+ check_err $? "Traffic to $TEST_GROUP was forwarded after entry removed"
+
+ log_test "MDB add/del entry to port with state down "
+}
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.2/24 2001:db8:1::2/64
+}
+
+h2_destroy()
+{
+ simple_if_fini $h2 192.0.2.2/24 2001:db8:1::2/64
+}
+
+switch_create()
+{
+ # Enable multicast filtering
+ ip link add dev br0 type bridge mcast_snooping 1 mcast_querier 1
+
+ ip link set dev $swp1 master br0
+ ip link set dev $swp2 master br0
+
+ ip link set dev br0 up
+ ip link set dev $swp1 up
+
+ bridge link set dev $swp2 mcast_flood off
+ # Bridge currently has a "grace time" at creation time before it
+ # forwards multicast according to the mdb. Since we disable the
+ # mcast_flood setting per port
+ sleep 10
+}
+
+switch_destroy()
+{
+ ip link set dev $swp1 down
+ ip link set dev $swp2 down
+ ip link del dev br0
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+ h1_destroy
+ h2_destroy
+
+ vrf_cleanup
+}
+
+trap cleanup EXIT
+
+setup_prepare
+tests_run
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh b/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh
index 4b42dfd4efd1..072faa77f53b 100755
--- a/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh
+++ b/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh
@@ -11,6 +11,8 @@ NUM_NETIFS=2
source lib.sh
source ethtool_lib.sh
+TIMEOUT=$((WAIT_TIMEOUT * 1000)) # ms
+
setup_prepare()
{
swp1=${NETIFS[p1]}
@@ -18,7 +20,7 @@ setup_prepare()
swp3=$NETIF_NO_CABLE
}
-ethtool_extended_state_check()
+ethtool_ext_state()
{
local dev=$1; shift
local expected_ext_state=$1; shift
@@ -30,21 +32,27 @@ ethtool_extended_state_check()
| sed -e 's/^[[:space:]]*//')
ext_state=$(echo $ext_state | cut -d "," -f1)
- [[ $ext_state == $expected_ext_state ]]
- check_err $? "Expected \"$expected_ext_state\", got \"$ext_state\""
-
- [[ $ext_substate == $expected_ext_substate ]]
- check_err $? "Expected \"$expected_ext_substate\", got \"$ext_substate\""
+ if [[ $ext_state != $expected_ext_state ]]; then
+ echo "Expected \"$expected_ext_state\", got \"$ext_state\""
+ return 1
+ fi
+ if [[ $ext_substate != $expected_ext_substate ]]; then
+ echo "Expected \"$expected_ext_substate\", got \"$ext_substate\""
+ return 1
+ fi
}
autoneg()
{
+ local msg
+
RET=0
ip link set dev $swp1 up
- sleep 4
- ethtool_extended_state_check $swp1 "Autoneg" "No partner detected"
+ msg=$(busywait $TIMEOUT ethtool_ext_state $swp1 \
+ "Autoneg" "No partner detected")
+ check_err $? "$msg"
log_test "Autoneg, No partner detected"
@@ -53,6 +61,8 @@ autoneg()
autoneg_force_mode()
{
+ local msg
+
RET=0
ip link set dev $swp1 up
@@ -65,12 +75,13 @@ autoneg_force_mode()
ethtool_set $swp1 speed $speed1 autoneg off
ethtool_set $swp2 speed $speed2 autoneg off
- sleep 4
- ethtool_extended_state_check $swp1 "Autoneg" \
- "No partner detected during force mode"
+ msg=$(busywait $TIMEOUT ethtool_ext_state $swp1 \
+ "Autoneg" "No partner detected during force mode")
+ check_err $? "$msg"
- ethtool_extended_state_check $swp2 "Autoneg" \
- "No partner detected during force mode"
+ msg=$(busywait $TIMEOUT ethtool_ext_state $swp2 \
+ "Autoneg" "No partner detected during force mode")
+ check_err $? "$msg"
log_test "Autoneg, No partner detected during force mode"
@@ -83,12 +94,14 @@ autoneg_force_mode()
no_cable()
{
+ local msg
+
RET=0
ip link set dev $swp3 up
- sleep 1
- ethtool_extended_state_check $swp3 "No cable"
+ msg=$(busywait $TIMEOUT ethtool_ext_state $swp3 "No cable")
+ check_err $? "$msg"
log_test "No cable"
diff --git a/tools/testing/selftests/net/forwarding/hw_stats_l3.sh b/tools/testing/selftests/net/forwarding/hw_stats_l3.sh
index 1c11c4256d06..9c1f76e108af 100755
--- a/tools/testing/selftests/net/forwarding/hw_stats_l3.sh
+++ b/tools/testing/selftests/net/forwarding/hw_stats_l3.sh
@@ -162,14 +162,6 @@ ping_ipv6()
ping_test $h1.200 2001:db8:2::1 " IPv6"
}
-get_l3_stat()
-{
- local selector=$1; shift
-
- ip -j stats show dev $rp1.200 group offload subgroup l3_stats |
- jq '.[0].stats64.'$selector
-}
-
send_packets_rx_ipv4()
{
# Send 21 packets instead of 20, because the first one might trap and go
@@ -208,11 +200,11 @@ ___test_stats()
local a
local b
- a=$(get_l3_stat ${dir}.packets)
+ a=$(hw_stats_get l3_stats $rp1.200 ${dir} packets)
send_packets_${dir}_${prot}
"$@"
b=$(busywait "$TC_HIT_TIMEOUT" until_counter_is ">= $a + 20" \
- get_l3_stat ${dir}.packets)
+ hw_stats_get l3_stats $rp1.200 ${dir} packets)
check_err $? "Traffic not reflected in the counter: $a -> $b"
}
@@ -281,11 +273,11 @@ __test_stats_report()
RET=0
- a=$(get_l3_stat ${dir}.packets)
+ a=$(hw_stats_get l3_stats $rp1.200 ${dir} packets)
send_packets_${dir}_${prot}
ip address flush dev $rp1.200
b=$(busywait "$TC_HIT_TIMEOUT" until_counter_is ">= $a + 20" \
- get_l3_stat ${dir}.packets)
+ hw_stats_get l3_stats $rp1.200 ${dir} packets)
check_err $? "Traffic not reflected in the counter: $a -> $b"
log_test "Test ${dir} packets: stats pushed on loss of L3"
diff --git a/tools/testing/selftests/net/forwarding/hw_stats_l3_gre.sh b/tools/testing/selftests/net/forwarding/hw_stats_l3_gre.sh
new file mode 100755
index 000000000000..eb9ec4a68f84
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/hw_stats_l3_gre.sh
@@ -0,0 +1,109 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Test L3 stats on IP-in-IP GRE tunnel without key.
+
+# This test uses flat topology for IP tunneling tests. See ipip_lib.sh for more
+# details.
+
+ALL_TESTS="
+ ping_ipv4
+ test_stats_rx
+ test_stats_tx
+"
+NUM_NETIFS=6
+source lib.sh
+source ipip_lib.sh
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ ol1=${NETIFS[p2]}
+
+ ul1=${NETIFS[p3]}
+ ul2=${NETIFS[p4]}
+
+ ol2=${NETIFS[p5]}
+ h2=${NETIFS[p6]}
+
+ ol1mac=$(mac_get $ol1)
+
+ forwarding_enable
+ vrf_prepare
+ h1_create
+ h2_create
+ sw1_flat_create gre $ol1 $ul1
+ sw2_flat_create gre $ol2 $ul2
+ ip stats set dev g1a l3_stats on
+ ip stats set dev g2a l3_stats on
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ip stats set dev g1a l3_stats off
+ ip stats set dev g2a l3_stats off
+
+ sw2_flat_destroy $ol2 $ul2
+ sw1_flat_destroy $ol1 $ul1
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+ forwarding_restore
+}
+
+ping_ipv4()
+{
+ RET=0
+
+ ping_test $h1 192.0.2.18 " gre flat"
+}
+
+send_packets_ipv4()
+{
+ # Send 21 packets instead of 20, because the first one might trap and go
+ # through the SW datapath, which might not bump the HW counter.
+ $MZ $h1 -c 21 -d 20msec -p 100 \
+ -a own -b $ol1mac -A 192.0.2.1 -B 192.0.2.18 \
+ -q -t udp sp=54321,dp=12345
+}
+
+test_stats()
+{
+ local dev=$1; shift
+ local dir=$1; shift
+
+ local a
+ local b
+
+ RET=0
+
+ a=$(hw_stats_get l3_stats $dev $dir packets)
+ send_packets_ipv4
+ b=$(busywait "$TC_HIT_TIMEOUT" until_counter_is ">= $a + 20" \
+ hw_stats_get l3_stats $dev $dir packets)
+ check_err $? "Traffic not reflected in the counter: $a -> $b"
+
+ log_test "Test $dir packets: $prot"
+}
+
+test_stats_tx()
+{
+ test_stats g1a tx
+}
+
+test_stats_rx()
+{
+ test_stats g2a rx
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 664b9ecaf228..3ffb9d6c0950 100644..100755
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -27,6 +27,9 @@ INTERFACE_TIMEOUT=${INTERFACE_TIMEOUT:=600}
LOW_AGEING_TIME=${LOW_AGEING_TIME:=1000}
REQUIRE_JQ=${REQUIRE_JQ:=yes}
REQUIRE_MZ=${REQUIRE_MZ:=yes}
+REQUIRE_MTOOLS=${REQUIRE_MTOOLS:=no}
+STABLE_MAC_ADDRS=${STABLE_MAC_ADDRS:=no}
+TCPDUMP_EXTRA_FLAGS=${TCPDUMP_EXTRA_FLAGS:=}
relative_path="${BASH_SOURCE%/*}"
if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then
@@ -159,6 +162,12 @@ fi
if [[ "$REQUIRE_MZ" = "yes" ]]; then
require_command $MZ
fi
+if [[ "$REQUIRE_MTOOLS" = "yes" ]]; then
+ # https://github.com/vladimiroltean/mtools/
+ # patched for IPv6 support
+ require_command msend
+ require_command mreceive
+fi
if [[ ! -v NUM_NETIFS ]]; then
echo "SKIP: importer does not define \"NUM_NETIFS\""
@@ -214,10 +223,41 @@ create_netif()
esac
}
+declare -A MAC_ADDR_ORIG
+mac_addr_prepare()
+{
+ local new_addr=
+ local dev=
+
+ for ((i = 1; i <= NUM_NETIFS; ++i)); do
+ dev=${NETIFS[p$i]}
+ new_addr=$(printf "00:01:02:03:04:%02x" $i)
+
+ MAC_ADDR_ORIG["$dev"]=$(ip -j link show dev $dev | jq -e '.[].address')
+ # Strip quotes
+ MAC_ADDR_ORIG["$dev"]=${MAC_ADDR_ORIG["$dev"]//\"/}
+ ip link set dev $dev address $new_addr
+ done
+}
+
+mac_addr_restore()
+{
+ local dev=
+
+ for ((i = 1; i <= NUM_NETIFS; ++i)); do
+ dev=${NETIFS[p$i]}
+ ip link set dev $dev address ${MAC_ADDR_ORIG["$dev"]}
+ done
+}
+
if [[ "$NETIF_CREATE" = "yes" ]]; then
create_netif
fi
+if [[ "$STABLE_MAC_ADDRS" = "yes" ]]; then
+ mac_addr_prepare
+fi
+
for ((i = 1; i <= NUM_NETIFS; ++i)); do
ip link show dev ${NETIFS[p$i]} &> /dev/null
if [[ $? -ne 0 ]]; then
@@ -503,6 +543,10 @@ pre_cleanup()
echo "Pausing before cleanup, hit any key to continue"
read
fi
+
+ if [[ "$STABLE_MAC_ADDRS" = "yes" ]]; then
+ mac_addr_restore
+ fi
}
vrf_prepare()
@@ -784,6 +828,17 @@ ipv6_stats_get()
cat /proc/net/dev_snmp6/$dev | grep "^$stat" | cut -f2
}
+hw_stats_get()
+{
+ local suite=$1; shift
+ local if_name=$1; shift
+ local dir=$1; shift
+ local stat=$1; shift
+
+ ip -j stats show dev $if_name group offload subgroup $suite |
+ jq ".[0].stats64.$dir.$stat"
+}
+
humanize()
{
local speed=$1; shift
@@ -824,6 +879,15 @@ mac_get()
ip -j link show dev $if_name | jq -r '.[]["address"]'
}
+ipv6_lladdr_get()
+{
+ local if_name=$1
+
+ ip -j addr show dev $if_name | \
+ jq -r '.[]["addr_info"][] | select(.scope == "link").local' | \
+ head -1
+}
+
bridge_ageing_time_get()
{
local bridge=$1
@@ -1176,6 +1240,7 @@ learning_test()
# FDB entry was installed.
bridge link set dev $br_port1 flood off
+ ip link set $host1_if promisc on
tc qdisc add dev $host1_if ingress
tc filter add dev $host1_if ingress protocol ip pref 1 handle 101 \
flower dst_mac $mac action drop
@@ -1186,7 +1251,7 @@ learning_test()
tc -j -s filter show dev $host1_if ingress \
| jq -e ".[] | select(.options.handle == 101) \
| select(.options.actions[0].stats.packets == 1)" &> /dev/null
- check_fail $? "Packet reached second host when should not"
+ check_fail $? "Packet reached first host when should not"
$MZ $host1_if -c 1 -p 64 -a $mac -t ip -q
sleep 1
@@ -1225,6 +1290,7 @@ learning_test()
tc filter del dev $host1_if ingress protocol ip pref 1 handle 101 flower
tc qdisc del dev $host1_if ingress
+ ip link set $host1_if promisc off
bridge link set dev $br_port1 flood on
@@ -1242,6 +1308,7 @@ flood_test_do()
# Add an ACL on `host2_if` which will tell us whether the packet
# was flooded to it or not.
+ ip link set $host2_if promisc on
tc qdisc add dev $host2_if ingress
tc filter add dev $host2_if ingress protocol ip pref 1 handle 101 \
flower dst_mac $mac action drop
@@ -1259,6 +1326,7 @@ flood_test_do()
tc filter del dev $host2_if ingress protocol ip pref 1 handle 101 flower
tc qdisc del dev $host2_if ingress
+ ip link set $host2_if promisc off
return $err
}
@@ -1322,25 +1390,40 @@ flood_test()
__start_traffic()
{
+ local pktsize=$1; shift
local proto=$1; shift
local h_in=$1; shift # Where the traffic egresses the host
local sip=$1; shift
local dip=$1; shift
local dmac=$1; shift
- $MZ $h_in -p 8000 -A $sip -B $dip -c 0 \
+ $MZ $h_in -p $pktsize -A $sip -B $dip -c 0 \
-a own -b $dmac -t "$proto" -q "$@" &
sleep 1
}
+start_traffic_pktsize()
+{
+ local pktsize=$1; shift
+
+ __start_traffic $pktsize udp "$@"
+}
+
+start_tcp_traffic_pktsize()
+{
+ local pktsize=$1; shift
+
+ __start_traffic $pktsize tcp "$@"
+}
+
start_traffic()
{
- __start_traffic udp "$@"
+ start_traffic_pktsize 8000 "$@"
}
start_tcp_traffic()
{
- __start_traffic tcp "$@"
+ start_tcp_traffic_pktsize 8000 "$@"
}
stop_traffic()
@@ -1349,13 +1432,17 @@ stop_traffic()
{ kill %% && wait %%; } 2>/dev/null
}
+declare -A cappid
+declare -A capfile
+declare -A capout
+
tcpdump_start()
{
local if_name=$1; shift
local ns=$1; shift
- capfile=$(mktemp)
- capout=$(mktemp)
+ capfile[$if_name]=$(mktemp)
+ capout[$if_name]=$(mktemp)
if [ -z $ns ]; then
ns_cmd=""
@@ -1369,27 +1456,35 @@ tcpdump_start()
capuser="-Z $SUDO_USER"
fi
- $ns_cmd tcpdump -e -n -Q in -i $if_name \
- -s 65535 -B 32768 $capuser -w $capfile > "$capout" 2>&1 &
- cappid=$!
+ $ns_cmd tcpdump $TCPDUMP_EXTRA_FLAGS -e -n -Q in -i $if_name \
+ -s 65535 -B 32768 $capuser -w ${capfile[$if_name]} \
+ > "${capout[$if_name]}" 2>&1 &
+ cappid[$if_name]=$!
sleep 1
}
tcpdump_stop()
{
- $ns_cmd kill $cappid
+ local if_name=$1
+ local pid=${cappid[$if_name]}
+
+ $ns_cmd kill "$pid" && wait "$pid"
sleep 1
}
tcpdump_cleanup()
{
- rm $capfile $capout
+ local if_name=$1
+
+ rm ${capfile[$if_name]} ${capout[$if_name]}
}
tcpdump_show()
{
- tcpdump -e -n -r $capfile 2>&1
+ local if_name=$1
+
+ tcpdump -e -n -r ${capfile[$if_name]} 2>&1
}
# return 0 if the packet wasn't seen on host2_if or 1 if it was
@@ -1499,6 +1594,37 @@ brmcast_check_sg_state()
done
}
+mc_join()
+{
+ local if_name=$1
+ local group=$2
+ local vrf_name=$(master_name_get $if_name)
+
+ # We don't care about actual reception, just about joining the
+ # IP multicast group and adding the L2 address to the device's
+ # MAC filtering table
+ ip vrf exec $vrf_name \
+ mreceive -g $group -I $if_name > /dev/null 2>&1 &
+ mreceive_pid=$!
+
+ sleep 1
+}
+
+mc_leave()
+{
+ kill "$mreceive_pid" && wait "$mreceive_pid"
+}
+
+mc_send()
+{
+ local if_name=$1
+ local groups=$2
+ local vrf_name=$(master_name_get $if_name)
+
+ ip vrf exec $vrf_name \
+ msend -g $groups -I $if_name -c 1 > /dev/null 2>&1
+}
+
start_ip_monitor()
{
local mtype=$1; shift
diff --git a/tools/testing/selftests/net/forwarding/local_termination.sh b/tools/testing/selftests/net/forwarding/local_termination.sh
new file mode 100755
index 000000000000..c5b0cbc85b3e
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/local_termination.sh
@@ -0,0 +1,299 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="standalone bridge"
+NUM_NETIFS=2
+PING_COUNT=1
+REQUIRE_MTOOLS=yes
+REQUIRE_MZ=no
+
+source lib.sh
+
+H1_IPV4="192.0.2.1"
+H2_IPV4="192.0.2.2"
+H1_IPV6="2001:db8:1::1"
+H2_IPV6="2001:db8:1::2"
+
+BRIDGE_ADDR="00:00:de:ad:be:ee"
+MACVLAN_ADDR="00:00:de:ad:be:ef"
+UNKNOWN_UC_ADDR1="de:ad:be:ef:ee:03"
+UNKNOWN_UC_ADDR2="de:ad:be:ef:ee:04"
+UNKNOWN_UC_ADDR3="de:ad:be:ef:ee:05"
+JOINED_IPV4_MC_ADDR="225.1.2.3"
+UNKNOWN_IPV4_MC_ADDR1="225.1.2.4"
+UNKNOWN_IPV4_MC_ADDR2="225.1.2.5"
+UNKNOWN_IPV4_MC_ADDR3="225.1.2.6"
+JOINED_IPV6_MC_ADDR="ff2e::0102:0304"
+UNKNOWN_IPV6_MC_ADDR1="ff2e::0102:0305"
+UNKNOWN_IPV6_MC_ADDR2="ff2e::0102:0306"
+UNKNOWN_IPV6_MC_ADDR3="ff2e::0102:0307"
+
+JOINED_MACV4_MC_ADDR="01:00:5e:01:02:03"
+UNKNOWN_MACV4_MC_ADDR1="01:00:5e:01:02:04"
+UNKNOWN_MACV4_MC_ADDR2="01:00:5e:01:02:05"
+UNKNOWN_MACV4_MC_ADDR3="01:00:5e:01:02:06"
+JOINED_MACV6_MC_ADDR="33:33:01:02:03:04"
+UNKNOWN_MACV6_MC_ADDR1="33:33:01:02:03:05"
+UNKNOWN_MACV6_MC_ADDR2="33:33:01:02:03:06"
+UNKNOWN_MACV6_MC_ADDR3="33:33:01:02:03:07"
+
+NON_IP_MC="01:02:03:04:05:06"
+NON_IP_PKT="00:04 48:45:4c:4f"
+BC="ff:ff:ff:ff:ff:ff"
+
+# Disable promisc to ensure we don't receive unknown MAC DA packets
+export TCPDUMP_EXTRA_FLAGS="-pl"
+
+h1=${NETIFS[p1]}
+h2=${NETIFS[p2]}
+
+send_non_ip()
+{
+ local if_name=$1
+ local smac=$2
+ local dmac=$3
+
+ $MZ -q $if_name "$dmac $smac $NON_IP_PKT"
+}
+
+send_uc_ipv4()
+{
+ local if_name=$1
+ local dmac=$2
+
+ ip neigh add $H2_IPV4 lladdr $dmac dev $if_name
+ ping_do $if_name $H2_IPV4
+ ip neigh del $H2_IPV4 dev $if_name
+}
+
+check_rcv()
+{
+ local if_name=$1
+ local type=$2
+ local pattern=$3
+ local should_receive=$4
+ local should_fail=
+
+ [ $should_receive = true ] && should_fail=0 || should_fail=1
+ RET=0
+
+ tcpdump_show $if_name | grep -q "$pattern"
+
+ check_err_fail "$should_fail" "$?" "reception"
+
+ log_test "$if_name: $type"
+}
+
+mc_route_prepare()
+{
+ local if_name=$1
+ local vrf_name=$(master_name_get $if_name)
+
+ ip route add 225.100.1.0/24 dev $if_name vrf $vrf_name
+ ip -6 route add ff2e::/64 dev $if_name vrf $vrf_name
+}
+
+mc_route_destroy()
+{
+ local if_name=$1
+ local vrf_name=$(master_name_get $if_name)
+
+ ip route del 225.100.1.0/24 dev $if_name vrf $vrf_name
+ ip -6 route del ff2e::/64 dev $if_name vrf $vrf_name
+}
+
+run_test()
+{
+ local rcv_if_name=$1
+ local smac=$(mac_get $h1)
+ local rcv_dmac=$(mac_get $rcv_if_name)
+
+ tcpdump_start $rcv_if_name
+
+ mc_route_prepare $h1
+ mc_route_prepare $rcv_if_name
+
+ send_uc_ipv4 $h1 $rcv_dmac
+ send_uc_ipv4 $h1 $MACVLAN_ADDR
+ send_uc_ipv4 $h1 $UNKNOWN_UC_ADDR1
+
+ ip link set dev $rcv_if_name promisc on
+ send_uc_ipv4 $h1 $UNKNOWN_UC_ADDR2
+ mc_send $h1 $UNKNOWN_IPV4_MC_ADDR2
+ mc_send $h1 $UNKNOWN_IPV6_MC_ADDR2
+ ip link set dev $rcv_if_name promisc off
+
+ mc_join $rcv_if_name $JOINED_IPV4_MC_ADDR
+ mc_send $h1 $JOINED_IPV4_MC_ADDR
+ mc_leave
+
+ mc_join $rcv_if_name $JOINED_IPV6_MC_ADDR
+ mc_send $h1 $JOINED_IPV6_MC_ADDR
+ mc_leave
+
+ mc_send $h1 $UNKNOWN_IPV4_MC_ADDR1
+ mc_send $h1 $UNKNOWN_IPV6_MC_ADDR1
+
+ ip link set dev $rcv_if_name allmulticast on
+ send_uc_ipv4 $h1 $UNKNOWN_UC_ADDR3
+ mc_send $h1 $UNKNOWN_IPV4_MC_ADDR3
+ mc_send $h1 $UNKNOWN_IPV6_MC_ADDR3
+ ip link set dev $rcv_if_name allmulticast off
+
+ mc_route_destroy $rcv_if_name
+ mc_route_destroy $h1
+
+ sleep 1
+
+ tcpdump_stop $rcv_if_name
+
+ check_rcv $rcv_if_name "Unicast IPv4 to primary MAC address" \
+ "$smac > $rcv_dmac, ethertype IPv4 (0x0800)" \
+ true
+
+ check_rcv $rcv_if_name "Unicast IPv4 to macvlan MAC address" \
+ "$smac > $MACVLAN_ADDR, ethertype IPv4 (0x0800)" \
+ true
+
+ check_rcv $rcv_if_name "Unicast IPv4 to unknown MAC address" \
+ "$smac > $UNKNOWN_UC_ADDR1, ethertype IPv4 (0x0800)" \
+ false
+
+ check_rcv $rcv_if_name "Unicast IPv4 to unknown MAC address, promisc" \
+ "$smac > $UNKNOWN_UC_ADDR2, ethertype IPv4 (0x0800)" \
+ true
+
+ check_rcv $rcv_if_name "Unicast IPv4 to unknown MAC address, allmulti" \
+ "$smac > $UNKNOWN_UC_ADDR3, ethertype IPv4 (0x0800)" \
+ false
+
+ check_rcv $rcv_if_name "Multicast IPv4 to joined group" \
+ "$smac > $JOINED_MACV4_MC_ADDR, ethertype IPv4 (0x0800)" \
+ true
+
+ check_rcv $rcv_if_name "Multicast IPv4 to unknown group" \
+ "$smac > $UNKNOWN_MACV4_MC_ADDR1, ethertype IPv4 (0x0800)" \
+ false
+
+ check_rcv $rcv_if_name "Multicast IPv4 to unknown group, promisc" \
+ "$smac > $UNKNOWN_MACV4_MC_ADDR2, ethertype IPv4 (0x0800)" \
+ true
+
+ check_rcv $rcv_if_name "Multicast IPv4 to unknown group, allmulti" \
+ "$smac > $UNKNOWN_MACV4_MC_ADDR3, ethertype IPv4 (0x0800)" \
+ true
+
+ check_rcv $rcv_if_name "Multicast IPv6 to joined group" \
+ "$smac > $JOINED_MACV6_MC_ADDR, ethertype IPv6 (0x86dd)" \
+ true
+
+ check_rcv $rcv_if_name "Multicast IPv6 to unknown group" \
+ "$smac > $UNKNOWN_MACV6_MC_ADDR1, ethertype IPv6 (0x86dd)" \
+ false
+
+ check_rcv $rcv_if_name "Multicast IPv6 to unknown group, promisc" \
+ "$smac > $UNKNOWN_MACV6_MC_ADDR2, ethertype IPv6 (0x86dd)" \
+ true
+
+ check_rcv $rcv_if_name "Multicast IPv6 to unknown group, allmulti" \
+ "$smac > $UNKNOWN_MACV6_MC_ADDR3, ethertype IPv6 (0x86dd)" \
+ true
+
+ tcpdump_cleanup $rcv_if_name
+}
+
+h1_create()
+{
+ simple_if_init $h1 $H1_IPV4/24 $H1_IPV6/64
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 $H1_IPV4/24 $H1_IPV6/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 $H2_IPV4/24 $H2_IPV6/64
+}
+
+h2_destroy()
+{
+ simple_if_fini $h2 $H2_IPV4/24 $H2_IPV6/64
+}
+
+bridge_create()
+{
+ ip link add br0 type bridge
+ ip link set br0 address $BRIDGE_ADDR
+ ip link set br0 up
+
+ ip link set $h2 master br0
+ ip link set $h2 up
+
+ simple_if_init br0 $H2_IPV4/24 $H2_IPV6/64
+}
+
+bridge_destroy()
+{
+ simple_if_fini br0 $H2_IPV4/24 $H2_IPV6/64
+
+ ip link del br0
+}
+
+standalone()
+{
+ h1_create
+ h2_create
+
+ ip link add link $h2 name macvlan0 type macvlan mode private
+ ip link set macvlan0 address $MACVLAN_ADDR
+ ip link set macvlan0 up
+
+ run_test $h2
+
+ ip link del macvlan0
+
+ h2_destroy
+ h1_destroy
+}
+
+bridge()
+{
+ h1_create
+ bridge_create
+
+ ip link add link br0 name macvlan0 type macvlan mode private
+ ip link set macvlan0 address $MACVLAN_ADDR
+ ip link set macvlan0 up
+
+ run_test br0
+
+ ip link del macvlan0
+
+ bridge_destroy
+ h1_destroy
+}
+
+cleanup()
+{
+ pre_cleanup
+ vrf_cleanup
+}
+
+setup_prepare()
+{
+ vrf_prepare
+ # setup_wait() needs this
+ ip link set $h1 up
+ ip link set $h2 up
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
index 28d568c48a73..91e431cd919e 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh
@@ -141,12 +141,13 @@ switch_create()
ip link set dev $swp4 up
ip link add name br1 type bridge vlan_filtering 1
- ip link set dev br1 up
- __addr_add_del br1 add 192.0.2.129/32
- ip -4 route add 192.0.2.130/32 dev br1
team_create lag loadbalance $swp3 $swp4
ip link set dev lag master br1
+
+ ip link set dev br1 up
+ __addr_add_del br1 add 192.0.2.129/32
+ ip -4 route add 192.0.2.130/32 dev br1
}
switch_destroy()
diff --git a/tools/testing/selftests/net/forwarding/no_forwarding.sh b/tools/testing/selftests/net/forwarding/no_forwarding.sh
new file mode 100755
index 000000000000..af3b398d13f0
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/no_forwarding.sh
@@ -0,0 +1,261 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="standalone two_bridges one_bridge_two_pvids"
+NUM_NETIFS=4
+
+source lib.sh
+
+h1=${NETIFS[p1]}
+h2=${NETIFS[p3]}
+swp1=${NETIFS[p2]}
+swp2=${NETIFS[p4]}
+
+H1_IPV4="192.0.2.1"
+H2_IPV4="192.0.2.2"
+H1_IPV6="2001:db8:1::1"
+H2_IPV6="2001:db8:1::2"
+
+IPV4_ALLNODES="224.0.0.1"
+IPV6_ALLNODES="ff02::1"
+MACV4_ALLNODES="01:00:5e:00:00:01"
+MACV6_ALLNODES="33:33:00:00:00:01"
+NON_IP_MC="01:02:03:04:05:06"
+NON_IP_PKT="00:04 48:45:4c:4f"
+BC="ff:ff:ff:ff:ff:ff"
+
+# The full 4K VLAN space is too much to check, so strategically pick some
+# values which should provide reasonable coverage
+vids=(0 1 2 5 10 20 50 100 200 500 1000 1000 2000 4000 4094)
+
+send_non_ip()
+{
+ local if_name=$1
+ local smac=$2
+ local dmac=$3
+
+ $MZ -q $if_name "$dmac $smac $NON_IP_PKT"
+}
+
+send_uc_ipv4()
+{
+ local if_name=$1
+ local dmac=$2
+
+ ip neigh add $H2_IPV4 lladdr $dmac dev $if_name
+ ping_do $if_name $H2_IPV4
+ ip neigh del $H2_IPV4 dev $if_name
+}
+
+send_mc_ipv4()
+{
+ local if_name=$1
+
+ ping_do $if_name $IPV4_ALLNODES "-I $if_name"
+}
+
+send_uc_ipv6()
+{
+ local if_name=$1
+ local dmac=$2
+
+ ip -6 neigh add $H2_IPV6 lladdr $dmac dev $if_name
+ ping6_do $if_name $H2_IPV6
+ ip -6 neigh del $H2_IPV6 dev $if_name
+}
+
+send_mc_ipv6()
+{
+ local if_name=$1
+
+ ping6_do $if_name $IPV6_ALLNODES%$if_name
+}
+
+check_rcv()
+{
+ local if_name=$1
+ local type=$2
+ local pattern=$3
+ local should_fail=1
+
+ RET=0
+
+ tcpdump_show $if_name | grep -q "$pattern"
+
+ check_err_fail "$should_fail" "$?" "reception"
+
+ log_test "$type"
+}
+
+run_test()
+{
+ local test_name="$1"
+ local smac=$(mac_get $h1)
+ local dmac=$(mac_get $h2)
+ local h1_ipv6_lladdr=$(ipv6_lladdr_get $h1)
+ local vid=
+
+ echo "$test_name: Sending packets"
+
+ tcpdump_start $h2
+
+ send_non_ip $h1 $smac $dmac
+ send_non_ip $h1 $smac $NON_IP_MC
+ send_non_ip $h1 $smac $BC
+ send_uc_ipv4 $h1 $dmac
+ send_mc_ipv4 $h1
+ send_uc_ipv6 $h1 $dmac
+ send_mc_ipv6 $h1
+
+ for vid in "${vids[@]}"; do
+ vlan_create $h1 $vid
+ simple_if_init $h1.$vid $H1_IPV4/24 $H1_IPV6/64
+
+ send_non_ip $h1.$vid $smac $dmac
+ send_non_ip $h1.$vid $smac $NON_IP_MC
+ send_non_ip $h1.$vid $smac $BC
+ send_uc_ipv4 $h1.$vid $dmac
+ send_mc_ipv4 $h1.$vid
+ send_uc_ipv6 $h1.$vid $dmac
+ send_mc_ipv6 $h1.$vid
+
+ simple_if_fini $h1.$vid $H1_IPV4/24 $H1_IPV6/64
+ vlan_destroy $h1 $vid
+ done
+
+ sleep 1
+
+ echo "$test_name: Checking which packets were received"
+
+ tcpdump_stop $h2
+
+ check_rcv $h2 "$test_name: Unicast non-IP untagged" \
+ "$smac > $dmac, 802.3, length 4:"
+
+ check_rcv $h2 "$test_name: Multicast non-IP untagged" \
+ "$smac > $NON_IP_MC, 802.3, length 4:"
+
+ check_rcv $h2 "$test_name: Broadcast non-IP untagged" \
+ "$smac > $BC, 802.3, length 4:"
+
+ check_rcv $h2 "$test_name: Unicast IPv4 untagged" \
+ "$smac > $dmac, ethertype IPv4 (0x0800)"
+
+ check_rcv $h2 "$test_name: Multicast IPv4 untagged" \
+ "$smac > $MACV4_ALLNODES, ethertype IPv4 (0x0800).*: $H1_IPV4 > $IPV4_ALLNODES"
+
+ check_rcv $h2 "$test_name: Unicast IPv6 untagged" \
+ "$smac > $dmac, ethertype IPv6 (0x86dd).*8: $H1_IPV6 > $H2_IPV6"
+
+ check_rcv $h2 "$test_name: Multicast IPv6 untagged" \
+ "$smac > $MACV6_ALLNODES, ethertype IPv6 (0x86dd).*: $h1_ipv6_lladdr > $IPV6_ALLNODES"
+
+ for vid in "${vids[@]}"; do
+ check_rcv $h2 "$test_name: Unicast non-IP VID $vid" \
+ "$smac > $dmac, ethertype 802.1Q (0x8100).*vlan $vid,.*length 4"
+
+ check_rcv $h2 "$test_name: Multicast non-IP VID $vid" \
+ "$smac > $NON_IP_MC, ethertype 802.1Q (0x8100).*vlan $vid,.*length 4"
+
+ check_rcv $h2 "$test_name: Broadcast non-IP VID $vid" \
+ "$smac > $BC, ethertype 802.1Q (0x8100).*vlan $vid,.*length 4"
+
+ check_rcv $h2 "$test_name: Unicast IPv4 VID $vid" \
+ "$smac > $dmac, ethertype 802.1Q (0x8100).*vlan $vid,.*ethertype IPv4 (0x0800), $H1_IPV4 > $H2_IPV4"
+
+ check_rcv $h2 "$test_name: Multicast IPv4 VID $vid" \
+ "$smac > $MACV4_ALLNODES, ethertype 802.1Q (0x8100).*vlan $vid,.*ethertype IPv4 (0x0800), $H1_IPV4 > $IPV4_ALLNODES"
+
+ check_rcv $h2 "$test_name: Unicast IPv6 VID $vid" \
+ "$smac > $dmac, ethertype 802.1Q (0x8100).*vlan $vid,.*ethertype IPv6 (0x86dd), $H1_IPV6 > $H2_IPV6"
+
+ check_rcv $h2 "$test_name: Multicast IPv6 VID $vid" \
+ "$smac > $MACV6_ALLNODES, ethertype 802.1Q (0x8100).*vlan $vid,.*ethertype IPv6 (0x86dd), $h1_ipv6_lladdr > $IPV6_ALLNODES"
+ done
+
+ tcpdump_cleanup $h2
+}
+
+standalone()
+{
+ run_test "Standalone switch ports"
+}
+
+two_bridges()
+{
+ ip link add br0 type bridge && ip link set br0 up
+ ip link add br1 type bridge && ip link set br1 up
+ ip link set $swp1 master br0
+ ip link set $swp2 master br1
+
+ run_test "Switch ports in different bridges"
+
+ ip link del br1
+ ip link del br0
+}
+
+one_bridge_two_pvids()
+{
+ ip link add br0 type bridge vlan_filtering 1 vlan_default_pvid 0
+ ip link set br0 up
+ ip link set $swp1 master br0
+ ip link set $swp2 master br0
+
+ bridge vlan add dev $swp1 vid 1 pvid untagged
+ bridge vlan add dev $swp1 vid 2 pvid untagged
+
+ run_test "Switch ports in VLAN-aware bridge with different PVIDs"
+
+ ip link del br0
+}
+
+h1_create()
+{
+ simple_if_init $h1 $H1_IPV4/24 $H1_IPV6/64
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 $H1_IPV4/24 $H1_IPV6/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 $H2_IPV4/24 $H2_IPV6/64
+}
+
+h2_destroy()
+{
+ simple_if_fini $h2 $H2_IPV4/24 $H2_IPV6/64
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+setup_prepare()
+{
+ vrf_prepare
+
+ h1_create
+ h2_create
+ # we call simple_if_init from the test itself, but setup_wait expects
+ # that we call it from here, and waits until the interfaces are up
+ ip link set dev $swp1 up
+ ip link set dev $swp2 up
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/router.sh b/tools/testing/selftests/net/forwarding/router.sh
index 057f91b05098..b98ea9449b8b 100755
--- a/tools/testing/selftests/net/forwarding/router.sh
+++ b/tools/testing/selftests/net/forwarding/router.sh
@@ -1,6 +1,24 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+# +--------------------+ +----------------------+
+# | H1 | | H2 |
+# | | | |
+# | $h1 + | | + $h2 |
+# | 192.0.2.2/24 | | | | 198.51.100.2/24 |
+# | 2001:db8:1::2/64 | | | | 2001:db8:2::2/64 |
+# | | | | | |
+# +------------------|-+ +-|--------------------+
+# | |
+# +------------------|-------------------------|--------------------+
+# | SW | | |
+# | | | |
+# | $rp1 + + $rp2 |
+# | 192.0.2.1/24 198.51.100.1/24 |
+# | 2001:db8:1::1/64 2001:db8:2::1/64 |
+# | |
+# +-----------------------------------------------------------------+
+
ALL_TESTS="
ping_ipv4
ping_ipv6
diff --git a/tools/testing/selftests/net/forwarding/router_vid_1.sh b/tools/testing/selftests/net/forwarding/router_vid_1.sh
index a7306c7ac06d..865c9f7d8143 100755
--- a/tools/testing/selftests/net/forwarding/router_vid_1.sh
+++ b/tools/testing/selftests/net/forwarding/router_vid_1.sh
@@ -1,7 +1,32 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-ALL_TESTS="ping_ipv4 ping_ipv6"
+# +--------------------+ +----------------------+
+# | H1 | | H2 |
+# | | | |
+# | $h1.1 + | | + $h2.1 |
+# | 192.0.2.2/24 | | | | 198.51.100.2/24 |
+# | 2001:db8:1::2/64 | | | | 2001:db8:2::2/64 |
+# | | | | | |
+# | $h1 + | | + $h2 |
+# | | | | | |
+# +------------------|-+ +-|--------------------+
+# | |
+# +------------------|-------------------------|--------------------+
+# | SW | | |
+# | | | |
+# | $rp1 + + $rp2 |
+# | | | |
+# | $rp1.1 + + $rp2.1 |
+# | 192.0.2.1/24 198.51.100.1/24 |
+# | 2001:db8:1::1/64 2001:db8:2::1/64 |
+# | |
+# +-----------------------------------------------------------------+
+
+ALL_TESTS="
+ ping_ipv4
+ ping_ipv6
+"
NUM_NETIFS=4
source lib.sh
diff --git a/tools/testing/selftests/net/forwarding/tc_actions.sh b/tools/testing/selftests/net/forwarding/tc_actions.sh
index de19eb6c38f0..1e0a62f638fe 100755
--- a/tools/testing/selftests/net/forwarding/tc_actions.sh
+++ b/tools/testing/selftests/net/forwarding/tc_actions.sh
@@ -60,7 +60,7 @@ mirred_egress_test()
RET=0
tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
- $tcflags dst_ip 192.0.2.2 action drop
+ dst_ip 192.0.2.2 action drop
$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
-t ip -q
diff --git a/tools/testing/selftests/net/forwarding/tsn_lib.sh b/tools/testing/selftests/net/forwarding/tsn_lib.sh
new file mode 100644
index 000000000000..60a1423e8116
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/tsn_lib.sh
@@ -0,0 +1,235 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright 2021-2022 NXP
+
+REQUIRE_ISOCHRON=${REQUIRE_ISOCHRON:=yes}
+REQUIRE_LINUXPTP=${REQUIRE_LINUXPTP:=yes}
+
+# Tunables
+UTC_TAI_OFFSET=37
+ISOCHRON_CPU=1
+
+if [[ "$REQUIRE_ISOCHRON" = "yes" ]]; then
+ # https://github.com/vladimiroltean/tsn-scripts
+ # WARNING: isochron versions pre-1.0 are unstable,
+ # always use the latest version
+ require_command isochron
+fi
+if [[ "$REQUIRE_LINUXPTP" = "yes" ]]; then
+ require_command phc2sys
+ require_command ptp4l
+fi
+
+phc2sys_start()
+{
+ local if_name=$1
+ local uds_address=$2
+ local extra_args=""
+
+ if ! [ -z "${uds_address}" ]; then
+ extra_args="${extra_args} -z ${uds_address}"
+ fi
+
+ phc2sys_log="$(mktemp)"
+
+ chrt -f 10 phc2sys -m \
+ -c ${if_name} \
+ -s CLOCK_REALTIME \
+ -O ${UTC_TAI_OFFSET} \
+ --step_threshold 0.00002 \
+ --first_step_threshold 0.00002 \
+ ${extra_args} \
+ > "${phc2sys_log}" 2>&1 &
+ phc2sys_pid=$!
+
+ echo "phc2sys logs to ${phc2sys_log} and has pid ${phc2sys_pid}"
+
+ sleep 1
+}
+
+phc2sys_stop()
+{
+ { kill ${phc2sys_pid} && wait ${phc2sys_pid}; } 2> /dev/null
+ rm "${phc2sys_log}" 2> /dev/null
+}
+
+ptp4l_start()
+{
+ local if_name=$1
+ local slave_only=$2
+ local uds_address=$3
+ local log="ptp4l_log_${if_name}"
+ local pid="ptp4l_pid_${if_name}"
+ local extra_args=""
+
+ if [ "${slave_only}" = true ]; then
+ extra_args="${extra_args} -s"
+ fi
+
+ # declare dynamic variables ptp4l_log_${if_name} and ptp4l_pid_${if_name}
+ # as global, so that they can be referenced later
+ declare -g "${log}=$(mktemp)"
+
+ chrt -f 10 ptp4l -m -2 -P \
+ -i ${if_name} \
+ --step_threshold 0.00002 \
+ --first_step_threshold 0.00002 \
+ --tx_timestamp_timeout 100 \
+ --uds_address="${uds_address}" \
+ ${extra_args} \
+ > "${!log}" 2>&1 &
+ declare -g "${pid}=$!"
+
+ echo "ptp4l for interface ${if_name} logs to ${!log} and has pid ${!pid}"
+
+ sleep 1
+}
+
+ptp4l_stop()
+{
+ local if_name=$1
+ local log="ptp4l_log_${if_name}"
+ local pid="ptp4l_pid_${if_name}"
+
+ { kill ${!pid} && wait ${!pid}; } 2> /dev/null
+ rm "${!log}" 2> /dev/null
+}
+
+cpufreq_max()
+{
+ local cpu=$1
+ local freq="cpu${cpu}_freq"
+ local governor="cpu${cpu}_governor"
+
+ # Kernel may be compiled with CONFIG_CPU_FREQ disabled
+ if ! [ -d /sys/bus/cpu/devices/cpu${cpu}/cpufreq ]; then
+ return
+ fi
+
+ # declare dynamic variables cpu${cpu}_freq and cpu${cpu}_governor as
+ # global, so they can be referenced later
+ declare -g "${freq}=$(cat /sys/bus/cpu/devices/cpu${cpu}/cpufreq/scaling_min_freq)"
+ declare -g "${governor}=$(cat /sys/bus/cpu/devices/cpu${cpu}/cpufreq/scaling_governor)"
+
+ cat /sys/bus/cpu/devices/cpu${cpu}/cpufreq/scaling_max_freq > \
+ /sys/bus/cpu/devices/cpu${cpu}/cpufreq/scaling_min_freq
+ echo -n "performance" > \
+ /sys/bus/cpu/devices/cpu${cpu}/cpufreq/scaling_governor
+}
+
+cpufreq_restore()
+{
+ local cpu=$1
+ local freq="cpu${cpu}_freq"
+ local governor="cpu${cpu}_governor"
+
+ if ! [ -d /sys/bus/cpu/devices/cpu${cpu}/cpufreq ]; then
+ return
+ fi
+
+ echo "${!freq}" > /sys/bus/cpu/devices/cpu${cpu}/cpufreq/scaling_min_freq
+ echo -n "${!governor}" > \
+ /sys/bus/cpu/devices/cpu${cpu}/cpufreq/scaling_governor
+}
+
+isochron_recv_start()
+{
+ local if_name=$1
+ local uds=$2
+ local extra_args=$3
+
+ if ! [ -z "${uds}" ]; then
+ extra_args="--unix-domain-socket ${uds}"
+ fi
+
+ isochron rcv \
+ --interface ${if_name} \
+ --sched-priority 98 \
+ --sched-fifo \
+ --utc-tai-offset ${UTC_TAI_OFFSET} \
+ --quiet \
+ ${extra_args} & \
+ isochron_pid=$!
+
+ sleep 1
+}
+
+isochron_recv_stop()
+{
+ { kill ${isochron_pid} && wait ${isochron_pid}; } 2> /dev/null
+}
+
+isochron_do()
+{
+ local sender_if_name=$1; shift
+ local receiver_if_name=$1; shift
+ local sender_uds=$1; shift
+ local receiver_uds=$1; shift
+ local base_time=$1; shift
+ local cycle_time=$1; shift
+ local shift_time=$1; shift
+ local num_pkts=$1; shift
+ local vid=$1; shift
+ local priority=$1; shift
+ local dst_ip=$1; shift
+ local isochron_dat=$1; shift
+ local extra_args=""
+ local receiver_extra_args=""
+ local vrf="$(master_name_get ${sender_if_name})"
+ local use_l2="true"
+
+ if ! [ -z "${dst_ip}" ]; then
+ use_l2="false"
+ fi
+
+ if ! [ -z "${vrf}" ]; then
+ dst_ip="${dst_ip}%${vrf}"
+ fi
+
+ if ! [ -z "${vid}" ]; then
+ vid="--vid=${vid}"
+ fi
+
+ if [ -z "${receiver_uds}" ]; then
+ extra_args="${extra_args} --omit-remote-sync"
+ fi
+
+ if ! [ -z "${shift_time}" ]; then
+ extra_args="${extra_args} --shift-time=${shift_time}"
+ fi
+
+ if [ "${use_l2}" = "true" ]; then
+ extra_args="${extra_args} --l2 --etype=0xdead ${vid}"
+ receiver_extra_args="--l2 --etype=0xdead"
+ else
+ extra_args="${extra_args} --l4 --ip-destination=${dst_ip}"
+ receiver_extra_args="--l4"
+ fi
+
+ cpufreq_max ${ISOCHRON_CPU}
+
+ isochron_recv_start "${h2}" "${receiver_uds}" "${receiver_extra_args}"
+
+ isochron send \
+ --interface ${sender_if_name} \
+ --unix-domain-socket ${sender_uds} \
+ --priority ${priority} \
+ --base-time ${base_time} \
+ --cycle-time ${cycle_time} \
+ --num-frames ${num_pkts} \
+ --frame-size 64 \
+ --txtime \
+ --utc-tai-offset ${UTC_TAI_OFFSET} \
+ --cpu-mask $((1 << ${ISOCHRON_CPU})) \
+ --sched-fifo \
+ --sched-priority 98 \
+ --client 127.0.0.1 \
+ --sync-threshold 5000 \
+ --output-file ${isochron_dat} \
+ ${extra_args} \
+ --quiet
+
+ isochron_recv_stop
+
+ cpufreq_restore ${ISOCHRON_CPU}
+}
diff --git a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
index 0727e2012b68..43469c7de118 100755
--- a/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
+++ b/tools/testing/selftests/net/forwarding/vxlan_asymmetric.sh
@@ -525,7 +525,7 @@ arp_suppression()
log_test "neigh_suppress: on / neigh exists: yes"
- # Delete the neighbour from the the SVI. A single ARP request should be
+ # Delete the neighbour from the SVI. A single ARP request should be
# received by the remote VTEP
RET=0
diff --git a/tools/testing/selftests/net/io_uring_zerocopy_tx.c b/tools/testing/selftests/net/io_uring_zerocopy_tx.c
new file mode 100644
index 000000000000..9d64c560a2d6
--- /dev/null
+++ b/tools/testing/selftests/net/io_uring_zerocopy_tx.c
@@ -0,0 +1,605 @@
+/* SPDX-License-Identifier: MIT */
+/* based on linux-kernel/tools/testing/selftests/net/msg_zerocopy.c */
+#include <assert.h>
+#include <errno.h>
+#include <error.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <arpa/inet.h>
+#include <linux/errqueue.h>
+#include <linux/if_packet.h>
+#include <linux/io_uring.h>
+#include <linux/ipv6.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+#include <netinet/udp.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <sys/wait.h>
+
+#define NOTIF_TAG 0xfffffffULL
+#define NONZC_TAG 0
+#define ZC_TAG 1
+
+enum {
+ MODE_NONZC = 0,
+ MODE_ZC = 1,
+ MODE_ZC_FIXED = 2,
+ MODE_MIXED = 3,
+};
+
+static bool cfg_flush = false;
+static bool cfg_cork = false;
+static int cfg_mode = MODE_ZC_FIXED;
+static int cfg_nr_reqs = 8;
+static int cfg_family = PF_UNSPEC;
+static int cfg_payload_len;
+static int cfg_port = 8000;
+static int cfg_runtime_ms = 4200;
+
+static socklen_t cfg_alen;
+static struct sockaddr_storage cfg_dst_addr;
+
+static char payload[IP_MAXPACKET] __attribute__((aligned(4096)));
+
+struct io_sq_ring {
+ unsigned *head;
+ unsigned *tail;
+ unsigned *ring_mask;
+ unsigned *ring_entries;
+ unsigned *flags;
+ unsigned *array;
+};
+
+struct io_cq_ring {
+ unsigned *head;
+ unsigned *tail;
+ unsigned *ring_mask;
+ unsigned *ring_entries;
+ struct io_uring_cqe *cqes;
+};
+
+struct io_uring_sq {
+ unsigned *khead;
+ unsigned *ktail;
+ unsigned *kring_mask;
+ unsigned *kring_entries;
+ unsigned *kflags;
+ unsigned *kdropped;
+ unsigned *array;
+ struct io_uring_sqe *sqes;
+
+ unsigned sqe_head;
+ unsigned sqe_tail;
+
+ size_t ring_sz;
+};
+
+struct io_uring_cq {
+ unsigned *khead;
+ unsigned *ktail;
+ unsigned *kring_mask;
+ unsigned *kring_entries;
+ unsigned *koverflow;
+ struct io_uring_cqe *cqes;
+
+ size_t ring_sz;
+};
+
+struct io_uring {
+ struct io_uring_sq sq;
+ struct io_uring_cq cq;
+ int ring_fd;
+};
+
+#ifdef __alpha__
+# ifndef __NR_io_uring_setup
+# define __NR_io_uring_setup 535
+# endif
+# ifndef __NR_io_uring_enter
+# define __NR_io_uring_enter 536
+# endif
+# ifndef __NR_io_uring_register
+# define __NR_io_uring_register 537
+# endif
+#else /* !__alpha__ */
+# ifndef __NR_io_uring_setup
+# define __NR_io_uring_setup 425
+# endif
+# ifndef __NR_io_uring_enter
+# define __NR_io_uring_enter 426
+# endif
+# ifndef __NR_io_uring_register
+# define __NR_io_uring_register 427
+# endif
+#endif
+
+#if defined(__x86_64) || defined(__i386__)
+#define read_barrier() __asm__ __volatile__("":::"memory")
+#define write_barrier() __asm__ __volatile__("":::"memory")
+#else
+
+#define read_barrier() __sync_synchronize()
+#define write_barrier() __sync_synchronize()
+#endif
+
+static int io_uring_setup(unsigned int entries, struct io_uring_params *p)
+{
+ return syscall(__NR_io_uring_setup, entries, p);
+}
+
+static int io_uring_enter(int fd, unsigned int to_submit,
+ unsigned int min_complete,
+ unsigned int flags, sigset_t *sig)
+{
+ return syscall(__NR_io_uring_enter, fd, to_submit, min_complete,
+ flags, sig, _NSIG / 8);
+}
+
+static int io_uring_register_buffers(struct io_uring *ring,
+ const struct iovec *iovecs,
+ unsigned nr_iovecs)
+{
+ int ret;
+
+ ret = syscall(__NR_io_uring_register, ring->ring_fd,
+ IORING_REGISTER_BUFFERS, iovecs, nr_iovecs);
+ return (ret < 0) ? -errno : ret;
+}
+
+static int io_uring_register_notifications(struct io_uring *ring,
+ unsigned nr,
+ struct io_uring_notification_slot *slots)
+{
+ int ret;
+ struct io_uring_notification_register r = {
+ .nr_slots = nr,
+ .data = (unsigned long)slots,
+ };
+
+ ret = syscall(__NR_io_uring_register, ring->ring_fd,
+ IORING_REGISTER_NOTIFIERS, &r, sizeof(r));
+ return (ret < 0) ? -errno : ret;
+}
+
+static int io_uring_mmap(int fd, struct io_uring_params *p,
+ struct io_uring_sq *sq, struct io_uring_cq *cq)
+{
+ size_t size;
+ void *ptr;
+ int ret;
+
+ sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned);
+ ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING);
+ if (ptr == MAP_FAILED)
+ return -errno;
+ sq->khead = ptr + p->sq_off.head;
+ sq->ktail = ptr + p->sq_off.tail;
+ sq->kring_mask = ptr + p->sq_off.ring_mask;
+ sq->kring_entries = ptr + p->sq_off.ring_entries;
+ sq->kflags = ptr + p->sq_off.flags;
+ sq->kdropped = ptr + p->sq_off.dropped;
+ sq->array = ptr + p->sq_off.array;
+
+ size = p->sq_entries * sizeof(struct io_uring_sqe);
+ sq->sqes = mmap(0, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQES);
+ if (sq->sqes == MAP_FAILED) {
+ ret = -errno;
+err:
+ munmap(sq->khead, sq->ring_sz);
+ return ret;
+ }
+
+ cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe);
+ ptr = mmap(0, cq->ring_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING);
+ if (ptr == MAP_FAILED) {
+ ret = -errno;
+ munmap(sq->sqes, p->sq_entries * sizeof(struct io_uring_sqe));
+ goto err;
+ }
+ cq->khead = ptr + p->cq_off.head;
+ cq->ktail = ptr + p->cq_off.tail;
+ cq->kring_mask = ptr + p->cq_off.ring_mask;
+ cq->kring_entries = ptr + p->cq_off.ring_entries;
+ cq->koverflow = ptr + p->cq_off.overflow;
+ cq->cqes = ptr + p->cq_off.cqes;
+ return 0;
+}
+
+static int io_uring_queue_init(unsigned entries, struct io_uring *ring,
+ unsigned flags)
+{
+ struct io_uring_params p;
+ int fd, ret;
+
+ memset(ring, 0, sizeof(*ring));
+ memset(&p, 0, sizeof(p));
+ p.flags = flags;
+
+ fd = io_uring_setup(entries, &p);
+ if (fd < 0)
+ return fd;
+ ret = io_uring_mmap(fd, &p, &ring->sq, &ring->cq);
+ if (!ret)
+ ring->ring_fd = fd;
+ else
+ close(fd);
+ return ret;
+}
+
+static int io_uring_submit(struct io_uring *ring)
+{
+ struct io_uring_sq *sq = &ring->sq;
+ const unsigned mask = *sq->kring_mask;
+ unsigned ktail, submitted, to_submit;
+ int ret;
+
+ read_barrier();
+ if (*sq->khead != *sq->ktail) {
+ submitted = *sq->kring_entries;
+ goto submit;
+ }
+ if (sq->sqe_head == sq->sqe_tail)
+ return 0;
+
+ ktail = *sq->ktail;
+ to_submit = sq->sqe_tail - sq->sqe_head;
+ for (submitted = 0; submitted < to_submit; submitted++) {
+ read_barrier();
+ sq->array[ktail++ & mask] = sq->sqe_head++ & mask;
+ }
+ if (!submitted)
+ return 0;
+
+ if (*sq->ktail != ktail) {
+ write_barrier();
+ *sq->ktail = ktail;
+ write_barrier();
+ }
+submit:
+ ret = io_uring_enter(ring->ring_fd, submitted, 0,
+ IORING_ENTER_GETEVENTS, NULL);
+ return ret < 0 ? -errno : ret;
+}
+
+static inline void io_uring_prep_send(struct io_uring_sqe *sqe, int sockfd,
+ const void *buf, size_t len, int flags)
+{
+ memset(sqe, 0, sizeof(*sqe));
+ sqe->opcode = (__u8) IORING_OP_SEND;
+ sqe->fd = sockfd;
+ sqe->addr = (unsigned long) buf;
+ sqe->len = len;
+ sqe->msg_flags = (__u32) flags;
+}
+
+static inline void io_uring_prep_sendzc(struct io_uring_sqe *sqe, int sockfd,
+ const void *buf, size_t len, int flags,
+ unsigned slot_idx, unsigned zc_flags)
+{
+ io_uring_prep_send(sqe, sockfd, buf, len, flags);
+ sqe->opcode = (__u8) IORING_OP_SENDZC_NOTIF;
+ sqe->notification_idx = slot_idx;
+ sqe->ioprio = zc_flags;
+}
+
+static struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring)
+{
+ struct io_uring_sq *sq = &ring->sq;
+
+ if (sq->sqe_tail + 1 - sq->sqe_head > *sq->kring_entries)
+ return NULL;
+ return &sq->sqes[sq->sqe_tail++ & *sq->kring_mask];
+}
+
+static int io_uring_wait_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr)
+{
+ struct io_uring_cq *cq = &ring->cq;
+ const unsigned mask = *cq->kring_mask;
+ unsigned head = *cq->khead;
+ int ret;
+
+ *cqe_ptr = NULL;
+ do {
+ read_barrier();
+ if (head != *cq->ktail) {
+ *cqe_ptr = &cq->cqes[head & mask];
+ break;
+ }
+ ret = io_uring_enter(ring->ring_fd, 0, 1,
+ IORING_ENTER_GETEVENTS, NULL);
+ if (ret < 0)
+ return -errno;
+ } while (1);
+
+ return 0;
+}
+
+static inline void io_uring_cqe_seen(struct io_uring *ring)
+{
+ *(&ring->cq)->khead += 1;
+ write_barrier();
+}
+
+static unsigned long gettimeofday_ms(void)
+{
+ struct timeval tv;
+
+ gettimeofday(&tv, NULL);
+ return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
+}
+
+static void do_setsockopt(int fd, int level, int optname, int val)
+{
+ if (setsockopt(fd, level, optname, &val, sizeof(val)))
+ error(1, errno, "setsockopt %d.%d: %d", level, optname, val);
+}
+
+static int do_setup_tx(int domain, int type, int protocol)
+{
+ int fd;
+
+ fd = socket(domain, type, protocol);
+ if (fd == -1)
+ error(1, errno, "socket t");
+
+ do_setsockopt(fd, SOL_SOCKET, SO_SNDBUF, 1 << 21);
+
+ if (connect(fd, (void *) &cfg_dst_addr, cfg_alen))
+ error(1, errno, "connect");
+ return fd;
+}
+
+static void do_tx(int domain, int type, int protocol)
+{
+ struct io_uring_notification_slot b[1] = {{.tag = NOTIF_TAG}};
+ struct io_uring_sqe *sqe;
+ struct io_uring_cqe *cqe;
+ unsigned long packets = 0, bytes = 0;
+ struct io_uring ring;
+ struct iovec iov;
+ uint64_t tstop;
+ int i, fd, ret;
+ int compl_cqes = 0;
+
+ fd = do_setup_tx(domain, type, protocol);
+
+ ret = io_uring_queue_init(512, &ring, 0);
+ if (ret)
+ error(1, ret, "io_uring: queue init");
+
+ ret = io_uring_register_notifications(&ring, 1, b);
+ if (ret)
+ error(1, ret, "io_uring: tx ctx registration");
+
+ iov.iov_base = payload;
+ iov.iov_len = cfg_payload_len;
+
+ ret = io_uring_register_buffers(&ring, &iov, 1);
+ if (ret)
+ error(1, ret, "io_uring: buffer registration");
+
+ tstop = gettimeofday_ms() + cfg_runtime_ms;
+ do {
+ if (cfg_cork)
+ do_setsockopt(fd, IPPROTO_UDP, UDP_CORK, 1);
+
+ for (i = 0; i < cfg_nr_reqs; i++) {
+ unsigned zc_flags = 0;
+ unsigned buf_idx = 0;
+ unsigned slot_idx = 0;
+ unsigned mode = cfg_mode;
+ unsigned msg_flags = 0;
+
+ if (cfg_mode == MODE_MIXED)
+ mode = rand() % 3;
+
+ sqe = io_uring_get_sqe(&ring);
+
+ if (mode == MODE_NONZC) {
+ io_uring_prep_send(sqe, fd, payload,
+ cfg_payload_len, msg_flags);
+ sqe->user_data = NONZC_TAG;
+ } else {
+ if (cfg_flush) {
+ zc_flags |= IORING_RECVSEND_NOTIF_FLUSH;
+ compl_cqes++;
+ }
+ io_uring_prep_sendzc(sqe, fd, payload,
+ cfg_payload_len,
+ msg_flags, slot_idx, zc_flags);
+ if (mode == MODE_ZC_FIXED) {
+ sqe->ioprio |= IORING_RECVSEND_FIXED_BUF;
+ sqe->buf_index = buf_idx;
+ }
+ sqe->user_data = ZC_TAG;
+ }
+ }
+
+ ret = io_uring_submit(&ring);
+ if (ret != cfg_nr_reqs)
+ error(1, ret, "submit");
+
+ for (i = 0; i < cfg_nr_reqs; i++) {
+ ret = io_uring_wait_cqe(&ring, &cqe);
+ if (ret)
+ error(1, ret, "wait cqe");
+
+ if (cqe->user_data == NOTIF_TAG) {
+ compl_cqes--;
+ i--;
+ } else if (cqe->user_data != NONZC_TAG &&
+ cqe->user_data != ZC_TAG) {
+ error(1, cqe->res, "invalid user_data");
+ } else if (cqe->res <= 0 && cqe->res != -EAGAIN) {
+ error(1, cqe->res, "send failed");
+ } else {
+ if (cqe->res > 0) {
+ packets++;
+ bytes += cqe->res;
+ }
+ /* failed requests don't flush */
+ if (cfg_flush &&
+ cqe->res <= 0 &&
+ cqe->user_data == ZC_TAG)
+ compl_cqes--;
+ }
+ io_uring_cqe_seen(&ring);
+ }
+ if (cfg_cork)
+ do_setsockopt(fd, IPPROTO_UDP, UDP_CORK, 0);
+ } while (gettimeofday_ms() < tstop);
+
+ if (close(fd))
+ error(1, errno, "close");
+
+ fprintf(stderr, "tx=%lu (MB=%lu), tx/s=%lu (MB/s=%lu)\n",
+ packets, bytes >> 20,
+ packets / (cfg_runtime_ms / 1000),
+ (bytes >> 20) / (cfg_runtime_ms / 1000));
+
+ while (compl_cqes) {
+ ret = io_uring_wait_cqe(&ring, &cqe);
+ if (ret)
+ error(1, ret, "wait cqe");
+ io_uring_cqe_seen(&ring);
+ compl_cqes--;
+ }
+}
+
+static void do_test(int domain, int type, int protocol)
+{
+ int i;
+
+ for (i = 0; i < IP_MAXPACKET; i++)
+ payload[i] = 'a' + (i % 26);
+ do_tx(domain, type, protocol);
+}
+
+static void usage(const char *filepath)
+{
+ error(1, 0, "Usage: %s [-f] [-n<N>] [-z0] [-s<payload size>] "
+ "(-4|-6) [-t<time s>] -D<dst_ip> udp", filepath);
+}
+
+static void parse_opts(int argc, char **argv)
+{
+ const int max_payload_len = sizeof(payload) -
+ sizeof(struct ipv6hdr) -
+ sizeof(struct tcphdr) -
+ 40 /* max tcp options */;
+ struct sockaddr_in6 *addr6 = (void *) &cfg_dst_addr;
+ struct sockaddr_in *addr4 = (void *) &cfg_dst_addr;
+ char *daddr = NULL;
+ int c;
+
+ if (argc <= 1)
+ usage(argv[0]);
+ cfg_payload_len = max_payload_len;
+
+ while ((c = getopt(argc, argv, "46D:p:s:t:n:fc:m:")) != -1) {
+ switch (c) {
+ case '4':
+ if (cfg_family != PF_UNSPEC)
+ error(1, 0, "Pass one of -4 or -6");
+ cfg_family = PF_INET;
+ cfg_alen = sizeof(struct sockaddr_in);
+ break;
+ case '6':
+ if (cfg_family != PF_UNSPEC)
+ error(1, 0, "Pass one of -4 or -6");
+ cfg_family = PF_INET6;
+ cfg_alen = sizeof(struct sockaddr_in6);
+ break;
+ case 'D':
+ daddr = optarg;
+ break;
+ case 'p':
+ cfg_port = strtoul(optarg, NULL, 0);
+ break;
+ case 's':
+ cfg_payload_len = strtoul(optarg, NULL, 0);
+ break;
+ case 't':
+ cfg_runtime_ms = 200 + strtoul(optarg, NULL, 10) * 1000;
+ break;
+ case 'n':
+ cfg_nr_reqs = strtoul(optarg, NULL, 0);
+ break;
+ case 'f':
+ cfg_flush = 1;
+ break;
+ case 'c':
+ cfg_cork = strtol(optarg, NULL, 0);
+ break;
+ case 'm':
+ cfg_mode = strtol(optarg, NULL, 0);
+ break;
+ }
+ }
+
+ switch (cfg_family) {
+ case PF_INET:
+ memset(addr4, 0, sizeof(*addr4));
+ addr4->sin_family = AF_INET;
+ addr4->sin_port = htons(cfg_port);
+ if (daddr &&
+ inet_pton(AF_INET, daddr, &(addr4->sin_addr)) != 1)
+ error(1, 0, "ipv4 parse error: %s", daddr);
+ break;
+ case PF_INET6:
+ memset(addr6, 0, sizeof(*addr6));
+ addr6->sin6_family = AF_INET6;
+ addr6->sin6_port = htons(cfg_port);
+ if (daddr &&
+ inet_pton(AF_INET6, daddr, &(addr6->sin6_addr)) != 1)
+ error(1, 0, "ipv6 parse error: %s", daddr);
+ break;
+ default:
+ error(1, 0, "illegal domain");
+ }
+
+ if (cfg_payload_len > max_payload_len)
+ error(1, 0, "-s: payload exceeds max (%d)", max_payload_len);
+ if (cfg_mode == MODE_NONZC && cfg_flush)
+ error(1, 0, "-f: only zerocopy modes support notifications");
+ if (optind != argc - 1)
+ usage(argv[0]);
+}
+
+int main(int argc, char **argv)
+{
+ const char *cfg_test = argv[argc - 1];
+
+ parse_opts(argc, argv);
+
+ if (!strcmp(cfg_test, "tcp"))
+ do_test(cfg_family, SOCK_STREAM, 0);
+ else if (!strcmp(cfg_test, "udp"))
+ do_test(cfg_family, SOCK_DGRAM, 0);
+ else
+ error(1, 0, "unknown cfg_test %s", cfg_test);
+ return 0;
+}
diff --git a/tools/testing/selftests/net/io_uring_zerocopy_tx.sh b/tools/testing/selftests/net/io_uring_zerocopy_tx.sh
new file mode 100755
index 000000000000..6a65e4437640
--- /dev/null
+++ b/tools/testing/selftests/net/io_uring_zerocopy_tx.sh
@@ -0,0 +1,131 @@
+#!/bin/bash
+#
+# Send data between two processes across namespaces
+# Run twice: once without and once with zerocopy
+
+set -e
+
+readonly DEV="veth0"
+readonly DEV_MTU=65535
+readonly BIN_TX="./io_uring_zerocopy_tx"
+readonly BIN_RX="./msg_zerocopy"
+
+readonly RAND="$(mktemp -u XXXXXX)"
+readonly NSPREFIX="ns-${RAND}"
+readonly NS1="${NSPREFIX}1"
+readonly NS2="${NSPREFIX}2"
+
+readonly SADDR4='192.168.1.1'
+readonly DADDR4='192.168.1.2'
+readonly SADDR6='fd::1'
+readonly DADDR6='fd::2'
+
+readonly path_sysctl_mem="net.core.optmem_max"
+
+# No arguments: automated test
+if [[ "$#" -eq "0" ]]; then
+ IPs=( "4" "6" )
+ protocols=( "tcp" "udp" )
+
+ for IP in "${IPs[@]}"; do
+ for proto in "${protocols[@]}"; do
+ for mode in $(seq 1 3); do
+ $0 "$IP" "$proto" -m "$mode" -t 1 -n 32
+ $0 "$IP" "$proto" -m "$mode" -t 1 -n 32 -f
+ $0 "$IP" "$proto" -m "$mode" -t 1 -n 32 -c -f
+ done
+ done
+ done
+
+ echo "OK. All tests passed"
+ exit 0
+fi
+
+# Argument parsing
+if [[ "$#" -lt "2" ]]; then
+ echo "Usage: $0 [4|6] [tcp|udp|raw|raw_hdrincl|packet|packet_dgram] <args>"
+ exit 1
+fi
+
+readonly IP="$1"
+shift
+readonly TXMODE="$1"
+shift
+readonly EXTRA_ARGS="$@"
+
+# Argument parsing: configure addresses
+if [[ "${IP}" == "4" ]]; then
+ readonly SADDR="${SADDR4}"
+ readonly DADDR="${DADDR4}"
+elif [[ "${IP}" == "6" ]]; then
+ readonly SADDR="${SADDR6}"
+ readonly DADDR="${DADDR6}"
+else
+ echo "Invalid IP version ${IP}"
+ exit 1
+fi
+
+# Argument parsing: select receive mode
+#
+# This differs from send mode for
+# - packet: use raw recv, because packet receives skb clones
+# - raw_hdrinc: use raw recv, because hdrincl is a tx-only option
+case "${TXMODE}" in
+'packet' | 'packet_dgram' | 'raw_hdrincl')
+ RXMODE='raw'
+ ;;
+*)
+ RXMODE="${TXMODE}"
+ ;;
+esac
+
+# Start of state changes: install cleanup handler
+save_sysctl_mem="$(sysctl -n ${path_sysctl_mem})"
+
+cleanup() {
+ ip netns del "${NS2}"
+ ip netns del "${NS1}"
+ sysctl -w -q "${path_sysctl_mem}=${save_sysctl_mem}"
+}
+
+trap cleanup EXIT
+
+# Configure system settings
+sysctl -w -q "${path_sysctl_mem}=1000000"
+
+# Create virtual ethernet pair between network namespaces
+ip netns add "${NS1}"
+ip netns add "${NS2}"
+
+ip link add "${DEV}" mtu "${DEV_MTU}" netns "${NS1}" type veth \
+ peer name "${DEV}" mtu "${DEV_MTU}" netns "${NS2}"
+
+# Bring the devices up
+ip -netns "${NS1}" link set "${DEV}" up
+ip -netns "${NS2}" link set "${DEV}" up
+
+# Set fixed MAC addresses on the devices
+ip -netns "${NS1}" link set dev "${DEV}" address 02:02:02:02:02:02
+ip -netns "${NS2}" link set dev "${DEV}" address 06:06:06:06:06:06
+
+# Add fixed IP addresses to the devices
+ip -netns "${NS1}" addr add 192.168.1.1/24 dev "${DEV}"
+ip -netns "${NS2}" addr add 192.168.1.2/24 dev "${DEV}"
+ip -netns "${NS1}" addr add fd::1/64 dev "${DEV}" nodad
+ip -netns "${NS2}" addr add fd::2/64 dev "${DEV}" nodad
+
+# Optionally disable sg or csum offload to test edge cases
+# ip netns exec "${NS1}" ethtool -K "${DEV}" sg off
+
+do_test() {
+ local readonly ARGS="$1"
+
+ echo "ipv${IP} ${TXMODE} ${ARGS}"
+ ip netns exec "${NS2}" "${BIN_RX}" "-${IP}" -t 2 -C 2 -S "${SADDR}" -D "${DADDR}" -r "${RXMODE}" &
+ sleep 0.2
+ ip netns exec "${NS1}" "${BIN_TX}" "-${IP}" -t 1 -D "${DADDR}" ${ARGS} "${TXMODE}"
+ wait
+}
+
+do_test "${EXTRA_ARGS}"
+echo ok
diff --git a/tools/testing/selftests/net/ioam6.sh b/tools/testing/selftests/net/ioam6.sh
index a2b9fad5a9a6..4ceb401da1bf 100755
--- a/tools/testing/selftests/net/ioam6.sh
+++ b/tools/testing/selftests/net/ioam6.sh
@@ -117,6 +117,8 @@
# | Schema Data | |
# +-----------------------------------------------------------+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
################################################################################
# #
@@ -211,7 +213,7 @@ check_kernel_compatibility()
echo "SKIP: kernel version probably too old, missing ioam support"
ip link del veth0 2>/dev/null || true
ip netns del ioam-tmp-node || true
- exit 1
+ exit $ksft_skip
fi
ip -netns ioam-tmp-node route add db02::/64 encap ioam6 mode inline \
@@ -227,7 +229,7 @@ check_kernel_compatibility()
"without CONFIG_IPV6_IOAM6_LWTUNNEL?"
ip link del veth0 2>/dev/null || true
ip netns del ioam-tmp-node || true
- exit 1
+ exit $ksft_skip
fi
ip link del veth0 2>/dev/null || true
@@ -752,20 +754,20 @@ nfailed=0
if [ "$(id -u)" -ne 0 ]
then
echo "SKIP: Need root privileges"
- exit 1
+ exit $ksft_skip
fi
if [ ! -x "$(command -v ip)" ]
then
echo "SKIP: Could not run test without ip tool"
- exit 1
+ exit $ksft_skip
fi
ip ioam &>/dev/null
if [ $? = 1 ]
then
echo "SKIP: iproute2 too old, missing ioam command"
- exit 1
+ exit $ksft_skip
fi
check_kernel_compatibility
diff --git a/tools/testing/selftests/net/ipv6_flowlabel.c b/tools/testing/selftests/net/ipv6_flowlabel.c
index a7c41375374f..708a9822259d 100644
--- a/tools/testing/selftests/net/ipv6_flowlabel.c
+++ b/tools/testing/selftests/net/ipv6_flowlabel.c
@@ -9,6 +9,7 @@
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
+#include <linux/icmpv6.h>
#include <linux/in6.h>
#include <stdbool.h>
#include <stdio.h>
@@ -29,26 +30,48 @@
#ifndef IPV6_FLOWLABEL_MGR
#define IPV6_FLOWLABEL_MGR 32
#endif
+#ifndef IPV6_FLOWINFO_SEND
+#define IPV6_FLOWINFO_SEND 33
+#endif
#define FLOWLABEL_WILDCARD ((uint32_t) -1)
static const char cfg_data[] = "a";
static uint32_t cfg_label = 1;
+static bool use_ping;
+static bool use_flowinfo_send;
+
+static struct icmp6hdr icmp6 = {
+ .icmp6_type = ICMPV6_ECHO_REQUEST
+};
+
+static struct sockaddr_in6 addr = {
+ .sin6_family = AF_INET6,
+ .sin6_addr = IN6ADDR_LOOPBACK_INIT,
+};
static void do_send(int fd, bool with_flowlabel, uint32_t flowlabel)
{
char control[CMSG_SPACE(sizeof(flowlabel))] = {0};
struct msghdr msg = {0};
- struct iovec iov = {0};
+ struct iovec iov = {
+ .iov_base = (char *)cfg_data,
+ .iov_len = sizeof(cfg_data)
+ };
int ret;
- iov.iov_base = (char *)cfg_data;
- iov.iov_len = sizeof(cfg_data);
+ if (use_ping) {
+ iov.iov_base = &icmp6;
+ iov.iov_len = sizeof(icmp6);
+ }
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
- if (with_flowlabel) {
+ if (use_flowinfo_send) {
+ msg.msg_name = &addr;
+ msg.msg_namelen = sizeof(addr);
+ } else if (with_flowlabel) {
struct cmsghdr *cm;
cm = (void *)control;
@@ -94,6 +117,8 @@ static void do_recv(int fd, bool with_flowlabel, uint32_t expect)
ret = recvmsg(fd, &msg, 0);
if (ret == -1)
error(1, errno, "recv");
+ if (use_ping)
+ goto parse_cmsg;
if (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))
error(1, 0, "recv: truncated");
if (ret != sizeof(cfg_data))
@@ -101,6 +126,7 @@ static void do_recv(int fd, bool with_flowlabel, uint32_t expect)
if (memcmp(data, cfg_data, sizeof(data)))
error(1, 0, "recv: data mismatch");
+parse_cmsg:
cm = CMSG_FIRSTHDR(&msg);
if (with_flowlabel) {
if (!cm)
@@ -114,9 +140,11 @@ static void do_recv(int fd, bool with_flowlabel, uint32_t expect)
flowlabel = ntohl(*(uint32_t *)CMSG_DATA(cm));
fprintf(stderr, "recv with label %u\n", flowlabel);
- if (expect != FLOWLABEL_WILDCARD && expect != flowlabel)
+ if (expect != FLOWLABEL_WILDCARD && expect != flowlabel) {
fprintf(stderr, "recv: incorrect flowlabel %u != %u\n",
flowlabel, expect);
+ error(1, 0, "recv: flowlabel is wrong");
+ }
} else {
fprintf(stderr, "recv without label\n");
@@ -165,11 +193,17 @@ static void parse_opts(int argc, char **argv)
{
int c;
- while ((c = getopt(argc, argv, "l:")) != -1) {
+ while ((c = getopt(argc, argv, "l:ps")) != -1) {
switch (c) {
case 'l':
cfg_label = strtoul(optarg, NULL, 0);
break;
+ case 'p':
+ use_ping = true;
+ break;
+ case 's':
+ use_flowinfo_send = true;
+ break;
default:
error(1, 0, "%s: parse error", argv[0]);
}
@@ -178,27 +212,30 @@ static void parse_opts(int argc, char **argv)
int main(int argc, char **argv)
{
- struct sockaddr_in6 addr = {
- .sin6_family = AF_INET6,
- .sin6_port = htons(8000),
- .sin6_addr = IN6ADDR_LOOPBACK_INIT,
- };
const int one = 1;
int fdt, fdr;
+ int prot = 0;
+
+ addr.sin6_port = htons(8000);
parse_opts(argc, argv);
- fdt = socket(PF_INET6, SOCK_DGRAM, 0);
+ if (use_ping) {
+ fprintf(stderr, "attempting to use ping sockets\n");
+ prot = IPPROTO_ICMPV6;
+ }
+
+ fdt = socket(PF_INET6, SOCK_DGRAM, prot);
if (fdt == -1)
error(1, errno, "socket t");
- fdr = socket(PF_INET6, SOCK_DGRAM, 0);
+ fdr = use_ping ? fdt : socket(PF_INET6, SOCK_DGRAM, 0);
if (fdr == -1)
error(1, errno, "socket r");
if (connect(fdt, (void *)&addr, sizeof(addr)))
error(1, errno, "connect");
- if (bind(fdr, (void *)&addr, sizeof(addr)))
+ if (!use_ping && bind(fdr, (void *)&addr, sizeof(addr)))
error(1, errno, "bind");
flowlabel_get(fdt, cfg_label, IPV6_FL_S_EXCL, IPV6_FL_F_CREATE);
@@ -216,13 +253,21 @@ int main(int argc, char **argv)
do_recv(fdr, false, 0);
}
+ if (use_flowinfo_send) {
+ fprintf(stderr, "using IPV6_FLOWINFO_SEND to send label\n");
+ addr.sin6_flowinfo = htonl(cfg_label);
+ if (setsockopt(fdt, SOL_IPV6, IPV6_FLOWINFO_SEND, &one,
+ sizeof(one)) == -1)
+ error(1, errno, "setsockopt flowinfo_send");
+ }
+
fprintf(stderr, "send label\n");
do_send(fdt, true, cfg_label);
do_recv(fdr, true, cfg_label);
if (close(fdr))
error(1, errno, "close r");
- if (close(fdt))
+ if (!use_ping && close(fdt))
error(1, errno, "close t");
return 0;
diff --git a/tools/testing/selftests/net/ipv6_flowlabel.sh b/tools/testing/selftests/net/ipv6_flowlabel.sh
index d3bc6442704e..cee95e252bee 100755
--- a/tools/testing/selftests/net/ipv6_flowlabel.sh
+++ b/tools/testing/selftests/net/ipv6_flowlabel.sh
@@ -18,4 +18,20 @@ echo "TEST datapath (with auto-flowlabels)"
./in_netns.sh \
sh -c 'sysctl -q -w net.ipv6.auto_flowlabels=1 && ./ipv6_flowlabel -l 1'
+echo "TEST datapath (with ping-sockets)"
+./in_netns.sh \
+ sh -c 'sysctl -q -w net.ipv6.flowlabel_reflect=4 && \
+ sysctl -q -w net.ipv4.ping_group_range="0 2147483647" && \
+ ./ipv6_flowlabel -l 1 -p'
+
+echo "TEST datapath (with flowinfo-send)"
+./in_netns.sh \
+ sh -c './ipv6_flowlabel -l 1 -s'
+
+echo "TEST datapath (with ping-sockets flowinfo-send)"
+./in_netns.sh \
+ sh -c 'sysctl -q -w net.ipv6.flowlabel_reflect=4 && \
+ sysctl -q -w net.ipv4.ping_group_range="0 2147483647" && \
+ ./ipv6_flowlabel -l 1 -p -s'
+
echo OK. All tests passed
diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile
index f905d5358e68..43a723626126 100644
--- a/tools/testing/selftests/net/mptcp/Makefile
+++ b/tools/testing/selftests/net/mptcp/Makefile
@@ -1,12 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
top_srcdir = ../../../../..
-KSFT_KHDR_INSTALL := 1
CFLAGS = -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INCLUDES)
TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh \
- simult_flows.sh mptcp_sockopt.sh
+ simult_flows.sh mptcp_sockopt.sh userspace_pm.sh
TEST_GEN_FILES = mptcp_connect pm_nl_ctl mptcp_sockopt mptcp_inq
diff --git a/tools/testing/selftests/net/mptcp/config b/tools/testing/selftests/net/mptcp/config
index d36b7da5082a..38021a0dd527 100644
--- a/tools/testing/selftests/net/mptcp/config
+++ b/tools/testing/selftests/net/mptcp/config
@@ -12,6 +12,9 @@ CONFIG_NF_TABLES=m
CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XTABLES=m
CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NF_TABLES_INET=y
CONFIG_NFT_TPROXY=m
CONFIG_NFT_SOCKET=m
@@ -19,3 +22,8 @@ CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_SCH_INGRESS=m
diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
index ff821025d309..515859a5168b 100755
--- a/tools/testing/selftests/net/mptcp/diag.sh
+++ b/tools/testing/selftests/net/mptcp/diag.sh
@@ -61,6 +61,39 @@ chk_msk_nr()
__chk_nr "grep -c token:" $*
}
+wait_msk_nr()
+{
+ local condition="grep -c token:"
+ local expected=$1
+ local timeout=20
+ local msg nr
+ local max=0
+ local i=0
+
+ shift 1
+ msg=$*
+
+ while [ $i -lt $timeout ]; do
+ nr=$(ss -inmHMN $ns | $condition)
+ [ $nr == $expected ] && break;
+ [ $nr -gt $max ] && max=$nr
+ i=$((i + 1))
+ sleep 1
+ done
+
+ printf "%-50s" "$msg"
+ if [ $i -ge $timeout ]; then
+ echo "[ fail ] timeout while expecting $expected max $max last $nr"
+ ret=$test_cnt
+ elif [ $nr != $expected ]; then
+ echo "[ fail ] expected $expected found $nr"
+ ret=$test_cnt
+ else
+ echo "[ ok ]"
+ fi
+ test_cnt=$((test_cnt+1))
+}
+
chk_msk_fallback_nr()
{
__chk_nr "grep -c fallback" $*
@@ -71,6 +104,43 @@ chk_msk_remote_key_nr()
__chk_nr "grep -c remote_key" $*
}
+__chk_listen()
+{
+ local filter="$1"
+ local expected=$2
+
+ shift 2
+ msg=$*
+
+ nr=$(ss -N $ns -Ml "$filter" | grep -c LISTEN)
+ printf "%-50s" "$msg"
+
+ if [ $nr != $expected ]; then
+ echo "[ fail ] expected $expected found $nr"
+ ret=$test_cnt
+ else
+ echo "[ ok ]"
+ fi
+}
+
+chk_msk_listen()
+{
+ lport=$1
+ local msg="check for listen socket"
+
+ # destination port search should always return empty list
+ __chk_listen "dport $lport" 0 "listen match for dport $lport"
+
+ # should return 'our' mptcp listen socket
+ __chk_listen "sport $lport" 1 "listen match for sport $lport"
+
+ __chk_listen "src inet:0.0.0.0:$lport" 1 "listen match for saddr and sport"
+
+ __chk_listen "" 1 "all listen sockets"
+
+ nr=$(ss -Ml $filter | wc -l)
+}
+
# $1: ns, $2: port
wait_local_port_listen()
{
@@ -109,15 +179,16 @@ ip -n $ns link set dev lo up
echo "a" | \
timeout ${timeout_test} \
ip netns exec $ns \
- ./mptcp_connect -p 10000 -l -t ${timeout_poll} \
+ ./mptcp_connect -p 10000 -l -t ${timeout_poll} -w 20 \
0.0.0.0 >/dev/null &
wait_local_port_listen $ns 10000
chk_msk_nr 0 "no msk on netns creation"
+chk_msk_listen 10000
echo "b" | \
timeout ${timeout_test} \
ip netns exec $ns \
- ./mptcp_connect -p 10000 -r 0 -t ${timeout_poll} \
+ ./mptcp_connect -p 10000 -r 0 -t ${timeout_poll} -w 20 \
127.0.0.1 >/dev/null &
wait_connected $ns 10000
chk_msk_nr 2 "after MPC handshake "
@@ -129,13 +200,13 @@ flush_pids
echo "a" | \
timeout ${timeout_test} \
ip netns exec $ns \
- ./mptcp_connect -p 10001 -l -s TCP -t ${timeout_poll} \
+ ./mptcp_connect -p 10001 -l -s TCP -t ${timeout_poll} -w 20 \
0.0.0.0 >/dev/null &
wait_local_port_listen $ns 10001
echo "b" | \
timeout ${timeout_test} \
ip netns exec $ns \
- ./mptcp_connect -p 10001 -r 0 -t ${timeout_poll} \
+ ./mptcp_connect -p 10001 -r 0 -t ${timeout_poll} -w 20 \
127.0.0.1 >/dev/null &
wait_connected $ns 10001
chk_msk_fallback_nr 1 "check fallback"
@@ -146,7 +217,7 @@ for I in `seq 1 $NR_CLIENTS`; do
echo "a" | \
timeout ${timeout_test} \
ip netns exec $ns \
- ./mptcp_connect -p $((I+10001)) -l -w 10 \
+ ./mptcp_connect -p $((I+10001)) -l -w 20 \
-t ${timeout_poll} 0.0.0.0 >/dev/null &
done
wait_local_port_listen $ns $((NR_CLIENTS + 10001))
@@ -155,12 +226,11 @@ for I in `seq 1 $NR_CLIENTS`; do
echo "b" | \
timeout ${timeout_test} \
ip netns exec $ns \
- ./mptcp_connect -p $((I+10001)) -w 10 \
+ ./mptcp_connect -p $((I+10001)) -w 20 \
-t ${timeout_poll} 127.0.0.1 >/dev/null &
done
-sleep 1.5
-chk_msk_nr $((NR_CLIENTS*2)) "many msk socket present"
+wait_msk_nr $((NR_CLIENTS*2)) "many msk socket present"
flush_pids
exit $ret
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
index 8628aa61b763..e2ea6c126c99 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
@@ -265,7 +265,7 @@ static void sock_test_tcpulp(int sock, int proto, unsigned int line)
static int sock_listen_mptcp(const char * const listenaddr,
const char * const port)
{
- int sock;
+ int sock = -1;
struct addrinfo hints = {
.ai_protocol = IPPROTO_TCP,
.ai_socktype = SOCK_STREAM,
diff --git a/tools/testing/selftests/net/mptcp/mptcp_inq.c b/tools/testing/selftests/net/mptcp/mptcp_inq.c
index 29f75e2a1116..8672d898f8cd 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_inq.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_inq.c
@@ -88,7 +88,7 @@ static void xgetaddrinfo(const char *node, const char *service,
static int sock_listen_mptcp(const char * const listenaddr,
const char * const port)
{
- int sock;
+ int sock = -1;
struct addrinfo hints = {
.ai_protocol = IPPROTO_TCP,
.ai_socktype = SOCK_STREAM,
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 48ef112f42c2..ff83ef426df5 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -70,6 +70,7 @@ init_partial()
ip netns add $netns || exit $ksft_skip
ip -net $netns link set lo up
ip netns exec $netns sysctl -q net.mptcp.enabled=1
+ ip netns exec $netns sysctl -q net.mptcp.pm_type=0
ip netns exec $netns sysctl -q net.ipv4.conf.all.rp_filter=0
ip netns exec $netns sysctl -q net.ipv4.conf.default.rp_filter=0
if [ $checksum -eq 1 ]; then
@@ -266,6 +267,58 @@ reset_with_allow_join_id0()
ip netns exec $ns2 sysctl -q net.mptcp.allow_join_initial_addr_port=$ns2_enable
}
+# Modify TCP payload without corrupting the TCP packet
+#
+# This rule inverts a 8-bit word at byte offset 148 for the 2nd TCP ACK packets
+# carrying enough data.
+# Once it is done, the TCP Checksum field is updated so the packet is still
+# considered as valid at the TCP level.
+# Because the MPTCP checksum, covering the TCP options and data, has not been
+# updated, the modification will be detected and an MP_FAIL will be emitted:
+# what we want to validate here without corrupting "random" MPTCP options.
+#
+# To avoid having tc producing this pr_info() message for each TCP ACK packets
+# not carrying enough data:
+#
+# tc action pedit offset 162 out of bounds
+#
+# Netfilter is used to mark packets with enough data.
+reset_with_fail()
+{
+ reset "${1}" || return 1
+
+ ip netns exec $ns1 sysctl -q net.mptcp.checksum_enabled=1
+ ip netns exec $ns2 sysctl -q net.mptcp.checksum_enabled=1
+
+ check_invert=1
+ validate_checksum=1
+ local i="$2"
+ local ip="${3:-4}"
+ local tables
+
+ tables="iptables"
+ if [ $ip -eq 6 ]; then
+ tables="ip6tables"
+ fi
+
+ ip netns exec $ns2 $tables \
+ -t mangle \
+ -A OUTPUT \
+ -o ns2eth$i \
+ -p tcp \
+ -m length --length 150:9999 \
+ -m statistic --mode nth --packet 1 --every 99999 \
+ -j MARK --set-mark 42 || exit 1
+
+ tc -n $ns2 qdisc add dev ns2eth$i clsact || exit 1
+ tc -n $ns2 filter add dev ns2eth$i egress \
+ protocol ip prio 1000 \
+ handle 42 fw \
+ action pedit munge offset 148 u8 invert \
+ pipe csum tcp \
+ index 100 || exit 1
+}
+
fail_test()
{
ret=1
@@ -402,6 +455,12 @@ wait_mpj()
done
}
+kill_wait()
+{
+ kill $1 > /dev/null 2>&1
+ wait $1 2>/dev/null
+}
+
pm_nl_set_limits()
{
local ns=$1
@@ -601,6 +660,11 @@ do_transfer()
local port=$((10000 + TEST_COUNT - 1))
local cappid
+ local userspace_pm=0
+ local evts_ns1
+ local evts_ns1_pid
+ local evts_ns2
+ local evts_ns2_pid
:> "$cout"
:> "$sout"
@@ -637,10 +701,29 @@ do_transfer()
extra_args="-r ${speed:6}"
fi
+ if [[ "${addr_nr_ns1}" = "userspace_"* ]]; then
+ userspace_pm=1
+ addr_nr_ns1=${addr_nr_ns1:10}
+ fi
+
if [[ "${addr_nr_ns2}" = "fastclose_"* ]]; then
# disconnect
extra_args="$extra_args -I ${addr_nr_ns2:10}"
addr_nr_ns2=0
+ elif [[ "${addr_nr_ns2}" = "userspace_"* ]]; then
+ userspace_pm=1
+ addr_nr_ns2=${addr_nr_ns2:10}
+ fi
+
+ if [ $userspace_pm -eq 1 ]; then
+ evts_ns1=$(mktemp)
+ evts_ns2=$(mktemp)
+ :> "$evts_ns1"
+ :> "$evts_ns2"
+ ip netns exec ${listener_ns} ./pm_nl_ctl events >> "$evts_ns1" 2>&1 &
+ evts_ns1_pid=$!
+ ip netns exec ${connector_ns} ./pm_nl_ctl events >> "$evts_ns2" 2>&1 &
+ evts_ns2_pid=$!
fi
local local_addr
@@ -695,6 +778,8 @@ do_transfer()
if [ $addr_nr_ns1 -gt 0 ]; then
local counter=2
local add_nr_ns1=${addr_nr_ns1}
+ local id=10
+ local tk
while [ $add_nr_ns1 -gt 0 ]; do
local addr
if is_v6 "${connect_addr}"; then
@@ -702,9 +787,18 @@ do_transfer()
else
addr="10.0.$counter.1"
fi
- pm_nl_add_endpoint $ns1 $addr flags signal
+ if [ $userspace_pm -eq 0 ]; then
+ pm_nl_add_endpoint $ns1 $addr flags signal
+ else
+ tk=$(sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns1")
+ ip netns exec ${listener_ns} ./pm_nl_ctl ann $addr token $tk id $id
+ sleep 1
+ ip netns exec ${listener_ns} ./pm_nl_ctl rem token $tk id $id
+ fi
+
counter=$((counter + 1))
add_nr_ns1=$((add_nr_ns1 - 1))
+ id=$((id + 1))
done
elif [ $addr_nr_ns1 -lt 0 ]; then
local rm_nr_ns1=$((-addr_nr_ns1))
@@ -751,6 +845,8 @@ do_transfer()
if [ $addr_nr_ns2 -gt 0 ]; then
local add_nr_ns2=${addr_nr_ns2}
local counter=3
+ local id=20
+ local tk da dp sp
while [ $add_nr_ns2 -gt 0 ]; do
local addr
if is_v6 "${connect_addr}"; then
@@ -758,9 +854,23 @@ do_transfer()
else
addr="10.0.$counter.2"
fi
- pm_nl_add_endpoint $ns2 $addr flags $flags
+ if [ $userspace_pm -eq 0 ]; then
+ pm_nl_add_endpoint $ns2 $addr flags $flags
+ else
+ tk=$(sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
+ da=$(sed -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evts_ns2")
+ dp=$(sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
+ ip netns exec ${connector_ns} ./pm_nl_ctl csf lip $addr lid $id \
+ rip $da rport $dp token $tk
+ sleep 1
+ sp=$(grep "type:10" "$evts_ns2" |
+ sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
+ ip netns exec ${connector_ns} ./pm_nl_ctl dsf lip $addr lport $sp \
+ rip $da rport $dp token $tk
+ fi
counter=$((counter + 1))
add_nr_ns2=$((add_nr_ns2 - 1))
+ id=$((id + 1))
done
elif [ $addr_nr_ns2 -lt 0 ]; then
local rm_nr_ns2=$((-addr_nr_ns2))
@@ -837,6 +947,12 @@ do_transfer()
kill $cappid
fi
+ if [ $userspace_pm -eq 1 ]; then
+ kill_wait $evts_ns1_pid
+ kill_wait $evts_ns2_pid
+ rm -rf $evts_ns1 $evts_ns2
+ fi
+
NSTAT_HISTORY=/tmp/${listener_ns}.nstat ip netns exec ${listener_ns} \
nstat | grep Tcp > /tmp/${listener_ns}.out
NSTAT_HISTORY=/tmp/${connector_ns}.nstat ip netns exec ${connector_ns} \
@@ -961,6 +1077,7 @@ chk_csum_nr()
local csum_ns2=${2:-0}
local count
local dump_stats
+ local extra_msg=""
local allow_multi_errors_ns1=0
local allow_multi_errors_ns2=0
@@ -976,6 +1093,9 @@ chk_csum_nr()
printf "%-${nr_blank}s %s" " " "sum"
count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtDataCsumErr | awk '{print $2}')
[ -z "$count" ] && count=0
+ if [ "$count" != "$csum_ns1" ]; then
+ extra_msg="$extra_msg ns1=$count"
+ fi
if { [ "$count" != $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 0 ]; } ||
{ [ "$count" -lt $csum_ns1 ] && [ $allow_multi_errors_ns1 -eq 1 ]; }; then
echo "[fail] got $count data checksum error[s] expected $csum_ns1"
@@ -987,28 +1107,58 @@ chk_csum_nr()
echo -n " - csum "
count=$(ip netns exec $ns2 nstat -as | grep MPTcpExtDataCsumErr | awk '{print $2}')
[ -z "$count" ] && count=0
+ if [ "$count" != "$csum_ns2" ]; then
+ extra_msg="$extra_msg ns2=$count"
+ fi
if { [ "$count" != $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 0 ]; } ||
{ [ "$count" -lt $csum_ns2 ] && [ $allow_multi_errors_ns2 -eq 1 ]; }; then
echo "[fail] got $count data checksum error[s] expected $csum_ns2"
fail_test
dump_stats=1
else
- echo "[ ok ]"
+ echo -n "[ ok ]"
fi
[ "${dump_stats}" = 1 ] && dump_stats
+
+ echo "$extra_msg"
}
chk_fail_nr()
{
local fail_tx=$1
local fail_rx=$2
+ local ns_invert=${3:-""}
local count
local dump_stats
+ local ns_tx=$ns1
+ local ns_rx=$ns2
+ local extra_msg=""
+ local allow_tx_lost=0
+ local allow_rx_lost=0
+
+ if [[ $ns_invert = "invert" ]]; then
+ ns_tx=$ns2
+ ns_rx=$ns1
+ extra_msg=" invert"
+ fi
+
+ if [[ "${fail_tx}" = "-"* ]]; then
+ allow_tx_lost=1
+ fail_tx=${fail_tx:1}
+ fi
+ if [[ "${fail_rx}" = "-"* ]]; then
+ allow_rx_lost=1
+ fail_rx=${fail_rx:1}
+ fi
printf "%-${nr_blank}s %s" " " "ftx"
- count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtMPFailTx | awk '{print $2}')
+ count=$(ip netns exec $ns_tx nstat -as | grep MPTcpExtMPFailTx | awk '{print $2}')
[ -z "$count" ] && count=0
if [ "$count" != "$fail_tx" ]; then
+ extra_msg="$extra_msg,tx=$count"
+ fi
+ if { [ "$count" != "$fail_tx" ] && [ $allow_tx_lost -eq 0 ]; } ||
+ { [ "$count" -gt "$fail_tx" ] && [ $allow_tx_lost -eq 1 ]; }; then
echo "[fail] got $count MP_FAIL[s] TX expected $fail_tx"
fail_test
dump_stats=1
@@ -1017,17 +1167,23 @@ chk_fail_nr()
fi
echo -n " - failrx"
- count=$(ip netns exec $ns2 nstat -as | grep MPTcpExtMPFailRx | awk '{print $2}')
+ count=$(ip netns exec $ns_rx nstat -as | grep MPTcpExtMPFailRx | awk '{print $2}')
[ -z "$count" ] && count=0
if [ "$count" != "$fail_rx" ]; then
+ extra_msg="$extra_msg,rx=$count"
+ fi
+ if { [ "$count" != "$fail_rx" ] && [ $allow_rx_lost -eq 0 ]; } ||
+ { [ "$count" -gt "$fail_rx" ] && [ $allow_rx_lost -eq 1 ]; }; then
echo "[fail] got $count MP_FAIL[s] RX expected $fail_rx"
fail_test
dump_stats=1
else
- echo "[ ok ]"
+ echo -n "[ ok ]"
fi
[ "${dump_stats}" = 1 ] && dump_stats
+
+ echo "$extra_msg"
}
chk_fclose_nr()
@@ -1106,6 +1262,38 @@ chk_rst_nr()
echo "$extra_msg"
}
+chk_infi_nr()
+{
+ local infi_tx=$1
+ local infi_rx=$2
+ local count
+ local dump_stats
+
+ printf "%-${nr_blank}s %s" " " "itx"
+ count=$(ip netns exec $ns2 nstat -as | grep InfiniteMapTx | awk '{print $2}')
+ [ -z "$count" ] && count=0
+ if [ "$count" != "$infi_tx" ]; then
+ echo "[fail] got $count infinite map[s] TX expected $infi_tx"
+ fail_test
+ dump_stats=1
+ else
+ echo -n "[ ok ]"
+ fi
+
+ echo -n " - infirx"
+ count=$(ip netns exec $ns1 nstat -as | grep InfiniteMapRx | awk '{print $2}')
+ [ -z "$count" ] && count=0
+ if [ "$count" != "$infi_rx" ]; then
+ echo "[fail] got $count infinite map[s] RX expected $infi_rx"
+ fail_test
+ dump_stats=1
+ else
+ echo "[ ok ]"
+ fi
+
+ [ "${dump_stats}" = 1 ] && dump_stats
+}
+
chk_join_nr()
{
local syn_nr=$1
@@ -1115,7 +1303,8 @@ chk_join_nr()
local csum_ns2=${5:-0}
local fail_nr=${6:-0}
local rst_nr=${7:-0}
- local corrupted_pkts=${8:-0}
+ local infi_nr=${8:-0}
+ local corrupted_pkts=${9:-0}
local count
local dump_stats
local with_cookie
@@ -1166,10 +1355,11 @@ chk_join_nr()
echo "[ ok ]"
fi
[ "${dump_stats}" = 1 ] && dump_stats
- if [ $checksum -eq 1 ]; then
+ if [ $validate_checksum -eq 1 ]; then
chk_csum_nr $csum_ns1 $csum_ns2
chk_fail_nr $fail_nr $fail_nr
chk_rst_nr $rst_nr $rst_nr
+ chk_infi_nr $infi_nr $infi_nr
fi
}
@@ -1512,6 +1702,13 @@ wait_attempt_fail()
return 1
}
+set_userspace_pm()
+{
+ local ns=$1
+
+ ip netns exec $ns sysctl -q net.mptcp.pm_type=1
+}
+
subflows_tests()
{
if reset "no JOIN"; then
@@ -2231,6 +2428,36 @@ backup_tests()
chk_add_nr 1 1
chk_prio_nr 1 1
fi
+
+ if reset "mpc backup"; then
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
+ run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+ chk_join_nr 0 0 0
+ chk_prio_nr 0 1
+ fi
+
+ if reset "mpc backup both sides"; then
+ pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow,backup
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
+ run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+ chk_join_nr 0 0 0
+ chk_prio_nr 1 1
+ fi
+
+ if reset "mpc switch to backup"; then
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
+ chk_join_nr 0 0 0
+ chk_prio_nr 0 1
+ fi
+
+ if reset "mpc switch to backup both sides"; then
+ pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
+ chk_join_nr 0 0 0
+ chk_prio_nr 1 1
+ fi
}
add_addr_ports_tests()
@@ -2583,6 +2810,120 @@ fastclose_tests()
fi
}
+pedit_action_pkts()
+{
+ tc -n $ns2 -j -s action show action pedit index 100 | \
+ grep "packets" | \
+ sed 's/.*"packets":\([0-9]\+\),.*/\1/'
+}
+
+fail_tests()
+{
+ # single subflow
+ if reset_with_fail "Infinite map" 1; then
+ run_tests $ns1 $ns2 10.0.1.1 128
+ chk_join_nr 0 0 0 +1 +0 1 0 1 "$(pedit_action_pkts)"
+ chk_fail_nr 1 -1 invert
+ fi
+
+ # multiple subflows
+ if reset_with_fail "MP_FAIL MP_RST" 2; then
+ tc -n $ns2 qdisc add dev ns2eth1 root netem rate 1mbit delay 5
+ pm_nl_set_limits $ns1 0 1
+ pm_nl_set_limits $ns2 0 1
+ pm_nl_add_endpoint $ns2 10.0.2.2 dev ns2eth2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1 1024
+ chk_join_nr 1 1 1 1 0 1 1 0 "$(pedit_action_pkts)"
+ fi
+}
+
+userspace_tests()
+{
+ # userspace pm type prevents add_addr
+ if reset "userspace pm type prevents add_addr"; then
+ set_userspace_pm $ns1
+ pm_nl_set_limits $ns1 0 2
+ pm_nl_set_limits $ns2 0 2
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 0 0 0
+ chk_add_nr 0 0
+ fi
+
+ # userspace pm type does not echo add_addr without daemon
+ if reset "userspace pm no echo w/o daemon"; then
+ set_userspace_pm $ns2
+ pm_nl_set_limits $ns1 0 2
+ pm_nl_set_limits $ns2 0 2
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 0 0 0
+ chk_add_nr 1 0
+ fi
+
+ # userspace pm type rejects join
+ if reset "userspace pm type rejects join"; then
+ set_userspace_pm $ns1
+ pm_nl_set_limits $ns1 1 1
+ pm_nl_set_limits $ns2 1 1
+ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 1 1 0
+ fi
+
+ # userspace pm type does not send join
+ if reset "userspace pm type does not send join"; then
+ set_userspace_pm $ns2
+ pm_nl_set_limits $ns1 1 1
+ pm_nl_set_limits $ns2 1 1
+ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 0 0 0
+ fi
+
+ # userspace pm type prevents mp_prio
+ if reset "userspace pm type prevents mp_prio"; then
+ set_userspace_pm $ns1
+ pm_nl_set_limits $ns1 1 1
+ pm_nl_set_limits $ns2 1 1
+ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
+ chk_join_nr 1 1 0
+ chk_prio_nr 0 0
+ fi
+
+ # userspace pm type prevents rm_addr
+ if reset "userspace pm type prevents rm_addr"; then
+ set_userspace_pm $ns1
+ set_userspace_pm $ns2
+ pm_nl_set_limits $ns1 0 1
+ pm_nl_set_limits $ns2 0 1
+ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1 0 0 -1 slow
+ chk_join_nr 0 0 0
+ chk_rm_nr 0 0
+ fi
+
+ # userspace pm add & remove address
+ if reset "userspace pm add & remove address"; then
+ set_userspace_pm $ns1
+ pm_nl_set_limits $ns2 1 1
+ run_tests $ns1 $ns2 10.0.1.1 0 userspace_1 0 slow
+ chk_join_nr 1 1 1
+ chk_add_nr 1 1
+ chk_rm_nr 1 1 invert
+ fi
+
+ # userspace pm create destroy subflow
+ if reset "userspace pm create destroy subflow"; then
+ set_userspace_pm $ns2
+ pm_nl_set_limits $ns1 0 1
+ run_tests $ns1 $ns2 10.0.1.1 0 0 userspace_1 slow
+ chk_join_nr 1 1 1
+ chk_rm_nr 0 1
+ fi
+}
+
endpoint_tests()
{
# userspace pm type prevents add_addr
@@ -2668,6 +3009,8 @@ all_tests_sorted=(
d@deny_join_id0_tests
m@fullmesh_tests
z@fastclose_tests
+ F@fail_tests
+ u@userspace_tests
I@endpoint_tests
)
diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
index ac9a4d9c1764..ae61f39556ca 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
@@ -136,7 +136,7 @@ static void xgetaddrinfo(const char *node, const char *service,
static int sock_listen_mptcp(const char * const listenaddr,
const char * const port)
{
- int sock;
+ int sock = -1;
struct addrinfo hints = {
.ai_protocol = IPPROTO_TCP,
.ai_socktype = SOCK_STREAM,
diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
index a75a68ad652e..abddf4c63e79 100644
--- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
@@ -6,6 +6,7 @@
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
+#include <limits.h>
#include <sys/socket.h>
#include <sys/types.h>
@@ -21,17 +22,29 @@
#ifndef MPTCP_PM_NAME
#define MPTCP_PM_NAME "mptcp_pm"
#endif
+#ifndef MPTCP_PM_EVENTS
+#define MPTCP_PM_EVENTS "mptcp_pm_events"
+#endif
+#ifndef IPPROTO_MPTCP
+#define IPPROTO_MPTCP 262
+#endif
static void syntax(char *argv[])
{
- fprintf(stderr, "%s add|get|set|del|flush|dump|accept [<args>]\n", argv[0]);
+ fprintf(stderr, "%s add|ann|rem|csf|dsf|get|set|del|flush|dump|events|listen|accept [<args>]\n", argv[0]);
fprintf(stderr, "\tadd [flags signal|subflow|backup|fullmesh] [id <nr>] [dev <name>] <ip>\n");
+ fprintf(stderr, "\tann <local-ip> id <local-id> token <token> [port <local-port>] [dev <name>]\n");
+ fprintf(stderr, "\trem id <local-id> token <token>\n");
+ fprintf(stderr, "\tcsf lip <local-ip> lid <local-id> rip <remote-ip> rport <remote-port> token <token>\n");
+ fprintf(stderr, "\tdsf lip <local-ip> lport <local-port> rip <remote-ip> rport <remote-port> token <token>\n");
fprintf(stderr, "\tdel <id> [<ip>]\n");
fprintf(stderr, "\tget <id>\n");
- fprintf(stderr, "\tset [<ip>] [id <nr>] flags [no]backup|[no]fullmesh [port <nr>]\n");
+ fprintf(stderr, "\tset [<ip>] [id <nr>] flags [no]backup|[no]fullmesh [port <nr>] [token <token>] [rip <ip>] [rport <port>]\n");
fprintf(stderr, "\tflush\n");
fprintf(stderr, "\tdump\n");
fprintf(stderr, "\tlimits [<rcv addr max> <subflow max>]\n");
+ fprintf(stderr, "\tevents\n");
+ fprintf(stderr, "\tlisten <local-ip> <local-port>\n");
exit(0);
}
@@ -83,6 +96,108 @@ static void nl_error(struct nlmsghdr *nh)
}
}
+static int capture_events(int fd, int event_group)
+{
+ u_int8_t buffer[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) + 1024];
+ struct genlmsghdr *ghdr;
+ struct rtattr *attrs;
+ struct nlmsghdr *nh;
+ int ret = 0;
+ int res_len;
+ int msg_len;
+ fd_set rfds;
+
+ if (setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP,
+ &event_group, sizeof(event_group)) < 0)
+ error(1, errno, "could not join the " MPTCP_PM_EVENTS " mcast group");
+
+ do {
+ FD_ZERO(&rfds);
+ FD_SET(fd, &rfds);
+ res_len = NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) + 1024;
+
+ ret = select(FD_SETSIZE, &rfds, NULL, NULL, NULL);
+
+ if (ret < 0)
+ error(1, ret, "error in select() on NL socket");
+
+ res_len = recv(fd, buffer, res_len, 0);
+ if (res_len < 0)
+ error(1, res_len, "error on recv() from NL socket");
+
+ nh = (struct nlmsghdr *)buffer;
+
+ for (; NLMSG_OK(nh, res_len); nh = NLMSG_NEXT(nh, res_len)) {
+ if (nh->nlmsg_type == NLMSG_ERROR)
+ error(1, NLMSG_ERROR, "received invalid NL message");
+
+ ghdr = (struct genlmsghdr *)NLMSG_DATA(nh);
+
+ if (ghdr->cmd == 0)
+ continue;
+
+ fprintf(stderr, "type:%d", ghdr->cmd);
+
+ msg_len = nh->nlmsg_len - NLMSG_LENGTH(GENL_HDRLEN);
+
+ attrs = (struct rtattr *) ((char *) ghdr + GENL_HDRLEN);
+ while (RTA_OK(attrs, msg_len)) {
+ if (attrs->rta_type == MPTCP_ATTR_TOKEN)
+ fprintf(stderr, ",token:%u", *(__u32 *)RTA_DATA(attrs));
+ else if (attrs->rta_type == MPTCP_ATTR_FAMILY)
+ fprintf(stderr, ",family:%u", *(__u16 *)RTA_DATA(attrs));
+ else if (attrs->rta_type == MPTCP_ATTR_LOC_ID)
+ fprintf(stderr, ",loc_id:%u", *(__u8 *)RTA_DATA(attrs));
+ else if (attrs->rta_type == MPTCP_ATTR_REM_ID)
+ fprintf(stderr, ",rem_id:%u", *(__u8 *)RTA_DATA(attrs));
+ else if (attrs->rta_type == MPTCP_ATTR_SADDR4) {
+ u_int32_t saddr4 = ntohl(*(__u32 *)RTA_DATA(attrs));
+
+ fprintf(stderr, ",saddr4:%u.%u.%u.%u", saddr4 >> 24,
+ (saddr4 >> 16) & 0xFF, (saddr4 >> 8) & 0xFF,
+ (saddr4 & 0xFF));
+ } else if (attrs->rta_type == MPTCP_ATTR_SADDR6) {
+ char buf[INET6_ADDRSTRLEN];
+
+ if (inet_ntop(AF_INET6, RTA_DATA(attrs), buf,
+ sizeof(buf)) != NULL)
+ fprintf(stderr, ",saddr6:%s", buf);
+ } else if (attrs->rta_type == MPTCP_ATTR_DADDR4) {
+ u_int32_t daddr4 = ntohl(*(__u32 *)RTA_DATA(attrs));
+
+ fprintf(stderr, ",daddr4:%u.%u.%u.%u", daddr4 >> 24,
+ (daddr4 >> 16) & 0xFF, (daddr4 >> 8) & 0xFF,
+ (daddr4 & 0xFF));
+ } else if (attrs->rta_type == MPTCP_ATTR_DADDR6) {
+ char buf[INET6_ADDRSTRLEN];
+
+ if (inet_ntop(AF_INET6, RTA_DATA(attrs), buf,
+ sizeof(buf)) != NULL)
+ fprintf(stderr, ",daddr6:%s", buf);
+ } else if (attrs->rta_type == MPTCP_ATTR_SPORT)
+ fprintf(stderr, ",sport:%u",
+ ntohs(*(__u16 *)RTA_DATA(attrs)));
+ else if (attrs->rta_type == MPTCP_ATTR_DPORT)
+ fprintf(stderr, ",dport:%u",
+ ntohs(*(__u16 *)RTA_DATA(attrs)));
+ else if (attrs->rta_type == MPTCP_ATTR_BACKUP)
+ fprintf(stderr, ",backup:%u", *(__u8 *)RTA_DATA(attrs));
+ else if (attrs->rta_type == MPTCP_ATTR_ERROR)
+ fprintf(stderr, ",error:%u", *(__u8 *)RTA_DATA(attrs));
+ else if (attrs->rta_type == MPTCP_ATTR_SERVER_SIDE)
+ fprintf(stderr, ",server_side:%u", *(__u8 *)RTA_DATA(attrs));
+
+ attrs = RTA_NEXT(attrs, msg_len);
+ }
+ }
+ fprintf(stderr, "\n");
+ } while (1);
+
+ return 0;
+}
+
/* do a netlink command and, if max > 0, fetch the reply */
static int do_nl_req(int fd, struct nlmsghdr *nh, int len, int max)
{
@@ -116,11 +231,18 @@ static int do_nl_req(int fd, struct nlmsghdr *nh, int len, int max)
return ret;
}
-static int genl_parse_getfamily(struct nlmsghdr *nlh)
+static int genl_parse_getfamily(struct nlmsghdr *nlh, int *pm_family,
+ int *events_mcast_grp)
{
struct genlmsghdr *ghdr = NLMSG_DATA(nlh);
int len = nlh->nlmsg_len;
struct rtattr *attrs;
+ struct rtattr *grps;
+ struct rtattr *grp;
+ int got_events_grp;
+ int got_family;
+ int grps_len;
+ int grp_len;
if (nlh->nlmsg_type != GENL_ID_CTRL)
error(1, errno, "Not a controller message, len=%d type=0x%x\n",
@@ -135,9 +257,42 @@ static int genl_parse_getfamily(struct nlmsghdr *nlh)
error(1, errno, "Unknown controller command %d\n", ghdr->cmd);
attrs = (struct rtattr *) ((char *) ghdr + GENL_HDRLEN);
+ got_family = 0;
+ got_events_grp = 0;
+
while (RTA_OK(attrs, len)) {
- if (attrs->rta_type == CTRL_ATTR_FAMILY_ID)
- return *(__u16 *)RTA_DATA(attrs);
+ if (attrs->rta_type == CTRL_ATTR_FAMILY_ID) {
+ *pm_family = *(__u16 *)RTA_DATA(attrs);
+ got_family = 1;
+ } else if (attrs->rta_type == CTRL_ATTR_MCAST_GROUPS) {
+ grps = RTA_DATA(attrs);
+ grps_len = RTA_PAYLOAD(attrs);
+
+ while (RTA_OK(grps, grps_len)) {
+ grp = RTA_DATA(grps);
+ grp_len = RTA_PAYLOAD(grps);
+ got_events_grp = 0;
+
+ while (RTA_OK(grp, grp_len)) {
+ if (grp->rta_type == CTRL_ATTR_MCAST_GRP_ID)
+ *events_mcast_grp = *(__u32 *)RTA_DATA(grp);
+ else if (grp->rta_type == CTRL_ATTR_MCAST_GRP_NAME &&
+ !strcmp(RTA_DATA(grp), MPTCP_PM_EVENTS))
+ got_events_grp = 1;
+
+ grp = RTA_NEXT(grp, grp_len);
+ }
+
+ if (got_events_grp)
+ break;
+
+ grps = RTA_NEXT(grps, grps_len);
+ }
+ }
+
+ if (got_family && got_events_grp)
+ return 0;
+
attrs = RTA_NEXT(attrs, len);
}
@@ -145,7 +300,7 @@ static int genl_parse_getfamily(struct nlmsghdr *nlh)
return -1;
}
-static int resolve_mptcp_pm_netlink(int fd)
+static int resolve_mptcp_pm_netlink(int fd, int *pm_family, int *events_mcast_grp)
{
char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
@@ -167,7 +322,421 @@ static int resolve_mptcp_pm_netlink(int fd)
off += NLMSG_ALIGN(rta->rta_len);
do_nl_req(fd, nh, off, sizeof(data));
- return genl_parse_getfamily((void *)data);
+ return genl_parse_getfamily((void *)data, pm_family, events_mcast_grp);
+}
+
+int dsf(int fd, int pm_family, int argc, char *argv[])
+{
+ char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
+ 1024];
+ struct rtattr *rta, *addr;
+ u_int16_t family, port;
+ struct nlmsghdr *nh;
+ u_int32_t token;
+ int addr_start;
+ int off = 0;
+ int arg;
+
+ const char *params[5];
+
+ memset(params, 0, 5 * sizeof(const char *));
+
+ memset(data, 0, sizeof(data));
+ nh = (void *)data;
+ off = init_genl_req(data, pm_family, MPTCP_PM_CMD_SUBFLOW_DESTROY,
+ MPTCP_PM_VER);
+
+ if (argc < 12)
+ syntax(argv);
+
+ /* Params recorded in this order:
+ * <local-ip>, <local-port>, <remote-ip>, <remote-port>, <token>
+ */
+ for (arg = 2; arg < argc; arg++) {
+ if (!strcmp(argv[arg], "lip")) {
+ if (++arg >= argc)
+ error(1, 0, " missing local IP");
+
+ params[0] = argv[arg];
+ } else if (!strcmp(argv[arg], "lport")) {
+ if (++arg >= argc)
+ error(1, 0, " missing local port");
+
+ params[1] = argv[arg];
+ } else if (!strcmp(argv[arg], "rip")) {
+ if (++arg >= argc)
+ error(1, 0, " missing remote IP");
+
+ params[2] = argv[arg];
+ } else if (!strcmp(argv[arg], "rport")) {
+ if (++arg >= argc)
+ error(1, 0, " missing remote port");
+
+ params[3] = argv[arg];
+ } else if (!strcmp(argv[arg], "token")) {
+ if (++arg >= argc)
+ error(1, 0, " missing token");
+
+ params[4] = argv[arg];
+ } else
+ error(1, 0, "unknown keyword %s", argv[arg]);
+ }
+
+ for (arg = 0; arg < 4; arg = arg + 2) {
+ /* addr header */
+ addr_start = off;
+ addr = (void *)(data + off);
+ addr->rta_type = NLA_F_NESTED |
+ ((arg == 0) ? MPTCP_PM_ATTR_ADDR : MPTCP_PM_ATTR_ADDR_REMOTE);
+ addr->rta_len = RTA_LENGTH(0);
+ off += NLMSG_ALIGN(addr->rta_len);
+
+ /* addr data */
+ rta = (void *)(data + off);
+ if (inet_pton(AF_INET, params[arg], RTA_DATA(rta))) {
+ family = AF_INET;
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR4;
+ rta->rta_len = RTA_LENGTH(4);
+ } else if (inet_pton(AF_INET6, params[arg], RTA_DATA(rta))) {
+ family = AF_INET6;
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR6;
+ rta->rta_len = RTA_LENGTH(16);
+ } else
+ error(1, errno, "can't parse ip %s", params[arg]);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ /* family */
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_FAMILY;
+ rta->rta_len = RTA_LENGTH(2);
+ memcpy(RTA_DATA(rta), &family, 2);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ /* port */
+ port = atoi(params[arg + 1]);
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_PORT;
+ rta->rta_len = RTA_LENGTH(2);
+ memcpy(RTA_DATA(rta), &port, 2);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ addr->rta_len = off - addr_start;
+ }
+
+ /* token */
+ token = atoi(params[4]);
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ATTR_TOKEN;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &token, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ do_nl_req(fd, nh, off, 0);
+
+ return 0;
+}
+
+int csf(int fd, int pm_family, int argc, char *argv[])
+{
+ char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
+ 1024];
+ const char *params[5];
+ struct nlmsghdr *nh;
+ struct rtattr *addr;
+ struct rtattr *rta;
+ u_int16_t family;
+ u_int32_t token;
+ u_int16_t port;
+ int addr_start;
+ u_int8_t id;
+ int off = 0;
+ int arg;
+
+ memset(params, 0, 5 * sizeof(const char *));
+
+ memset(data, 0, sizeof(data));
+ nh = (void *)data;
+ off = init_genl_req(data, pm_family, MPTCP_PM_CMD_SUBFLOW_CREATE,
+ MPTCP_PM_VER);
+
+ if (argc < 12)
+ syntax(argv);
+
+ /* Params recorded in this order:
+ * <local-ip>, <local-id>, <remote-ip>, <remote-port>, <token>
+ */
+ for (arg = 2; arg < argc; arg++) {
+ if (!strcmp(argv[arg], "lip")) {
+ if (++arg >= argc)
+ error(1, 0, " missing local IP");
+
+ params[0] = argv[arg];
+ } else if (!strcmp(argv[arg], "lid")) {
+ if (++arg >= argc)
+ error(1, 0, " missing local id");
+
+ params[1] = argv[arg];
+ } else if (!strcmp(argv[arg], "rip")) {
+ if (++arg >= argc)
+ error(1, 0, " missing remote ip");
+
+ params[2] = argv[arg];
+ } else if (!strcmp(argv[arg], "rport")) {
+ if (++arg >= argc)
+ error(1, 0, " missing remote port");
+
+ params[3] = argv[arg];
+ } else if (!strcmp(argv[arg], "token")) {
+ if (++arg >= argc)
+ error(1, 0, " missing token");
+
+ params[4] = argv[arg];
+ } else
+ error(1, 0, "unknown param %s", argv[arg]);
+ }
+
+ for (arg = 0; arg < 4; arg = arg + 2) {
+ /* addr header */
+ addr_start = off;
+ addr = (void *)(data + off);
+ addr->rta_type = NLA_F_NESTED |
+ ((arg == 0) ? MPTCP_PM_ATTR_ADDR : MPTCP_PM_ATTR_ADDR_REMOTE);
+ addr->rta_len = RTA_LENGTH(0);
+ off += NLMSG_ALIGN(addr->rta_len);
+
+ /* addr data */
+ rta = (void *)(data + off);
+ if (inet_pton(AF_INET, params[arg], RTA_DATA(rta))) {
+ family = AF_INET;
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR4;
+ rta->rta_len = RTA_LENGTH(4);
+ } else if (inet_pton(AF_INET6, params[arg], RTA_DATA(rta))) {
+ family = AF_INET6;
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR6;
+ rta->rta_len = RTA_LENGTH(16);
+ } else
+ error(1, errno, "can't parse ip %s", params[arg]);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ /* family */
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_FAMILY;
+ rta->rta_len = RTA_LENGTH(2);
+ memcpy(RTA_DATA(rta), &family, 2);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ if (arg == 2) {
+ /* port */
+ port = atoi(params[arg + 1]);
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_PORT;
+ rta->rta_len = RTA_LENGTH(2);
+ memcpy(RTA_DATA(rta), &port, 2);
+ off += NLMSG_ALIGN(rta->rta_len);
+ }
+
+ if (arg == 0) {
+ /* id */
+ id = atoi(params[arg + 1]);
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ID;
+ rta->rta_len = RTA_LENGTH(1);
+ memcpy(RTA_DATA(rta), &id, 1);
+ off += NLMSG_ALIGN(rta->rta_len);
+ }
+
+ addr->rta_len = off - addr_start;
+ }
+
+ /* token */
+ token = atoi(params[4]);
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ATTR_TOKEN;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &token, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ do_nl_req(fd, nh, off, 0);
+
+ return 0;
+}
+
+int remove_addr(int fd, int pm_family, int argc, char *argv[])
+{
+ char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
+ 1024];
+ struct nlmsghdr *nh;
+ struct rtattr *rta;
+ u_int32_t token;
+ u_int8_t id;
+ int off = 0;
+ int arg;
+
+ memset(data, 0, sizeof(data));
+ nh = (void *)data;
+ off = init_genl_req(data, pm_family, MPTCP_PM_CMD_REMOVE,
+ MPTCP_PM_VER);
+
+ if (argc < 6)
+ syntax(argv);
+
+ for (arg = 2; arg < argc; arg++) {
+ if (!strcmp(argv[arg], "id")) {
+ if (++arg >= argc)
+ error(1, 0, " missing id value");
+
+ id = atoi(argv[arg]);
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ATTR_LOC_ID;
+ rta->rta_len = RTA_LENGTH(1);
+ memcpy(RTA_DATA(rta), &id, 1);
+ off += NLMSG_ALIGN(rta->rta_len);
+ } else if (!strcmp(argv[arg], "token")) {
+ if (++arg >= argc)
+ error(1, 0, " missing token value");
+
+ token = atoi(argv[arg]);
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ATTR_TOKEN;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &token, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+ } else
+ error(1, 0, "unknown keyword %s", argv[arg]);
+ }
+
+ do_nl_req(fd, nh, off, 0);
+ return 0;
+}
+
+int announce_addr(int fd, int pm_family, int argc, char *argv[])
+{
+ char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
+ NLMSG_ALIGN(sizeof(struct genlmsghdr)) +
+ 1024];
+ u_int32_t flags = MPTCP_PM_ADDR_FLAG_SIGNAL;
+ u_int32_t token = UINT_MAX;
+ struct rtattr *rta, *addr;
+ u_int32_t id = UINT_MAX;
+ struct nlmsghdr *nh;
+ u_int16_t family;
+ int addr_start;
+ int off = 0;
+ int arg;
+
+ memset(data, 0, sizeof(data));
+ nh = (void *)data;
+ off = init_genl_req(data, pm_family, MPTCP_PM_CMD_ANNOUNCE,
+ MPTCP_PM_VER);
+
+ if (argc < 7)
+ syntax(argv);
+
+ /* local-ip header */
+ addr_start = off;
+ addr = (void *)(data + off);
+ addr->rta_type = NLA_F_NESTED | MPTCP_PM_ATTR_ADDR;
+ addr->rta_len = RTA_LENGTH(0);
+ off += NLMSG_ALIGN(addr->rta_len);
+
+ /* local-ip data */
+ /* record addr type */
+ rta = (void *)(data + off);
+ if (inet_pton(AF_INET, argv[2], RTA_DATA(rta))) {
+ family = AF_INET;
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR4;
+ rta->rta_len = RTA_LENGTH(4);
+ } else if (inet_pton(AF_INET6, argv[2], RTA_DATA(rta))) {
+ family = AF_INET6;
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR6;
+ rta->rta_len = RTA_LENGTH(16);
+ } else
+ error(1, errno, "can't parse ip %s", argv[2]);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ /* addr family */
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_FAMILY;
+ rta->rta_len = RTA_LENGTH(2);
+ memcpy(RTA_DATA(rta), &family, 2);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ for (arg = 3; arg < argc; arg++) {
+ if (!strcmp(argv[arg], "id")) {
+ /* local-id */
+ if (++arg >= argc)
+ error(1, 0, " missing id value");
+
+ id = atoi(argv[arg]);
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ID;
+ rta->rta_len = RTA_LENGTH(1);
+ memcpy(RTA_DATA(rta), &id, 1);
+ off += NLMSG_ALIGN(rta->rta_len);
+ } else if (!strcmp(argv[arg], "dev")) {
+ /* for the if_index */
+ int32_t ifindex;
+
+ if (++arg >= argc)
+ error(1, 0, " missing dev name");
+
+ ifindex = if_nametoindex(argv[arg]);
+ if (!ifindex)
+ error(1, errno, "unknown device %s", argv[arg]);
+
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_IF_IDX;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &ifindex, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+ } else if (!strcmp(argv[arg], "port")) {
+ /* local-port (optional) */
+ u_int16_t port;
+
+ if (++arg >= argc)
+ error(1, 0, " missing port value");
+
+ port = atoi(argv[arg]);
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_PORT;
+ rta->rta_len = RTA_LENGTH(2);
+ memcpy(RTA_DATA(rta), &port, 2);
+ off += NLMSG_ALIGN(rta->rta_len);
+ } else if (!strcmp(argv[arg], "token")) {
+ /* MPTCP connection token */
+ if (++arg >= argc)
+ error(1, 0, " missing token value");
+
+ token = atoi(argv[arg]);
+ } else
+ error(1, 0, "unknown keyword %s", argv[arg]);
+ }
+
+ /* addr flags */
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_FLAGS;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &flags, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ addr->rta_len = off - addr_start;
+
+ if (id == UINT_MAX || token == UINT_MAX)
+ error(1, 0, " missing mandatory inputs");
+
+ /* token */
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ATTR_TOKEN;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &token, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ do_nl_req(fd, nh, off, 0);
+
+ return 0;
}
int add_addr(int fd, int pm_family, int argc, char *argv[])
@@ -654,6 +1223,54 @@ int get_set_limits(int fd, int pm_family, int argc, char *argv[])
return 0;
}
+int add_listener(int argc, char *argv[])
+{
+ struct sockaddr_storage addr;
+ struct sockaddr_in6 *a6;
+ struct sockaddr_in *a4;
+ u_int16_t family;
+ int enable = 1;
+ int sock;
+ int err;
+
+ if (argc < 4)
+ syntax(argv);
+
+ memset(&addr, 0, sizeof(struct sockaddr_storage));
+ a4 = (struct sockaddr_in *)&addr;
+ a6 = (struct sockaddr_in6 *)&addr;
+
+ if (inet_pton(AF_INET, argv[2], &a4->sin_addr)) {
+ family = AF_INET;
+ a4->sin_family = family;
+ a4->sin_port = htons(atoi(argv[3]));
+ } else if (inet_pton(AF_INET6, argv[2], &a6->sin6_addr)) {
+ family = AF_INET6;
+ a6->sin6_family = family;
+ a6->sin6_port = htons(atoi(argv[3]));
+ } else
+ error(1, errno, "can't parse ip %s", argv[2]);
+
+ sock = socket(family, SOCK_STREAM, IPPROTO_MPTCP);
+ if (sock < 0)
+ error(1, errno, "can't create listener sock\n");
+
+ if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(enable))) {
+ close(sock);
+ error(1, errno, "can't set SO_REUSEADDR on listener sock\n");
+ }
+
+ err = bind(sock, (struct sockaddr *)&addr,
+ ((family == AF_INET) ? sizeof(struct sockaddr_in) :
+ sizeof(struct sockaddr_in6)));
+
+ if (err == 0 && listen(sock, 30) == 0)
+ pause();
+
+ close(sock);
+ return 0;
+}
+
int set_flags(int fd, int pm_family, int argc, char *argv[])
{
char data[NLMSG_ALIGN(sizeof(struct nlmsghdr)) +
@@ -662,7 +1279,10 @@ int set_flags(int fd, int pm_family, int argc, char *argv[])
struct rtattr *rta, *nest;
struct nlmsghdr *nh;
u_int32_t flags = 0;
+ u_int32_t token = 0;
+ u_int16_t rport = 0;
u_int16_t family;
+ void *rip = NULL;
int nest_start;
int use_id = 0;
u_int8_t id;
@@ -722,7 +1342,13 @@ int set_flags(int fd, int pm_family, int argc, char *argv[])
error(1, 0, " missing flags keyword");
for (; arg < argc; arg++) {
- if (!strcmp(argv[arg], "flags")) {
+ if (!strcmp(argv[arg], "token")) {
+ if (++arg >= argc)
+ error(1, 0, " missing token value");
+
+ /* token */
+ token = atoi(argv[arg]);
+ } else if (!strcmp(argv[arg], "flags")) {
char *tok, *str;
/* flags */
@@ -761,19 +1387,81 @@ int set_flags(int fd, int pm_family, int argc, char *argv[])
rta->rta_len = RTA_LENGTH(2);
memcpy(RTA_DATA(rta), &port, 2);
off += NLMSG_ALIGN(rta->rta_len);
+ } else if (!strcmp(argv[arg], "rport")) {
+ if (++arg >= argc)
+ error(1, 0, " missing remote port");
+
+ rport = atoi(argv[arg]);
+ } else if (!strcmp(argv[arg], "rip")) {
+ if (++arg >= argc)
+ error(1, 0, " missing remote ip");
+
+ rip = argv[arg];
} else {
error(1, 0, "unknown keyword %s", argv[arg]);
}
}
nest->rta_len = off - nest_start;
+ /* token */
+ if (token) {
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ATTR_TOKEN;
+ rta->rta_len = RTA_LENGTH(4);
+ memcpy(RTA_DATA(rta), &token, 4);
+ off += NLMSG_ALIGN(rta->rta_len);
+ }
+
+ /* remote addr/port */
+ if (rip) {
+ nest_start = off;
+ nest = (void *)(data + off);
+ nest->rta_type = NLA_F_NESTED | MPTCP_PM_ATTR_ADDR_REMOTE;
+ nest->rta_len = RTA_LENGTH(0);
+ off += NLMSG_ALIGN(nest->rta_len);
+
+ /* addr data */
+ rta = (void *)(data + off);
+ if (inet_pton(AF_INET, rip, RTA_DATA(rta))) {
+ family = AF_INET;
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR4;
+ rta->rta_len = RTA_LENGTH(4);
+ } else if (inet_pton(AF_INET6, rip, RTA_DATA(rta))) {
+ family = AF_INET6;
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_ADDR6;
+ rta->rta_len = RTA_LENGTH(16);
+ } else {
+ error(1, errno, "can't parse ip %s", (char *)rip);
+ }
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ /* family */
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_FAMILY;
+ rta->rta_len = RTA_LENGTH(2);
+ memcpy(RTA_DATA(rta), &family, 2);
+ off += NLMSG_ALIGN(rta->rta_len);
+
+ if (rport) {
+ rta = (void *)(data + off);
+ rta->rta_type = MPTCP_PM_ADDR_ATTR_PORT;
+ rta->rta_len = RTA_LENGTH(2);
+ memcpy(RTA_DATA(rta), &rport, 2);
+ off += NLMSG_ALIGN(rta->rta_len);
+ }
+
+ nest->rta_len = off - nest_start;
+ }
+
do_nl_req(fd, nh, off, 0);
return 0;
}
int main(int argc, char *argv[])
{
- int fd, pm_family;
+ int events_mcast_grp;
+ int pm_family;
+ int fd;
if (argc < 2)
syntax(argv);
@@ -782,10 +1470,18 @@ int main(int argc, char *argv[])
if (fd == -1)
error(1, errno, "socket netlink");
- pm_family = resolve_mptcp_pm_netlink(fd);
+ resolve_mptcp_pm_netlink(fd, &pm_family, &events_mcast_grp);
if (!strcmp(argv[1], "add"))
return add_addr(fd, pm_family, argc, argv);
+ else if (!strcmp(argv[1], "ann"))
+ return announce_addr(fd, pm_family, argc, argv);
+ else if (!strcmp(argv[1], "rem"))
+ return remove_addr(fd, pm_family, argc, argv);
+ else if (!strcmp(argv[1], "csf"))
+ return csf(fd, pm_family, argc, argv);
+ else if (!strcmp(argv[1], "dsf"))
+ return dsf(fd, pm_family, argc, argv);
else if (!strcmp(argv[1], "del"))
return del_addr(fd, pm_family, argc, argv);
else if (!strcmp(argv[1], "flush"))
@@ -798,6 +1494,10 @@ int main(int argc, char *argv[])
return get_set_limits(fd, pm_family, argc, argv);
else if (!strcmp(argv[1], "set"))
return set_flags(fd, pm_family, argc, argv);
+ else if (!strcmp(argv[1], "events"))
+ return capture_events(fd, events_mcast_grp);
+ else if (!strcmp(argv[1], "listen"))
+ return add_listener(argc, argv);
fprintf(stderr, "unknown sub-command: %s", argv[1]);
syntax(argv);
diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
index f441ff7904fc..ffa13a957a36 100755
--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
+++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
@@ -12,6 +12,7 @@ timeout_test=$((timeout_poll * 2 + 1))
test_cnt=1
ret=0
bail=0
+slack=50
usage() {
echo "Usage: $0 [ -b ] [ -c ] [ -d ]"
@@ -52,6 +53,7 @@ setup()
cout=$(mktemp)
capout=$(mktemp)
size=$((2 * 2048 * 4096))
+
dd if=/dev/zero of=$small bs=4096 count=20 >/dev/null 2>&1
dd if=/dev/zero of=$large bs=4096 count=$((size / 4096)) >/dev/null 2>&1
@@ -104,6 +106,16 @@ setup()
ip -net "$ns3" route add default via dead:beef:3::2
ip netns exec "$ns3" ./pm_nl_ctl limits 1 1
+
+ # debug build can slow down measurably the test program
+ # we use quite tight time limit on the run-time, to ensure
+ # maximum B/W usage.
+ # Use kmemleak/lockdep/kasan/prove_locking presence as a rough
+ # estimate for this being a debug kernel and increase the
+ # maximum run-time accordingly. Observed run times for CI builds
+ # running selftests, including kbuild, were used to determine the
+ # amount of time to add.
+ grep -q ' kmemleak_init$\| lockdep_init$\| kasan_init$\| prove_locking$' /proc/kallsyms && slack=$((slack+550))
}
# $1: ns, $2: port
@@ -241,7 +253,7 @@ run_test()
# mptcp_connect will do some sleeps to allow the mp_join handshake
# completion (see mptcp_connect): 200ms on each side, add some slack
- time=$((time + 450))
+ time=$((time + 400 + slack))
printf "%-60s" "$msg"
do_transfer $small $large $time
diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
new file mode 100755
index 000000000000..3229725b64b0
--- /dev/null
+++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
@@ -0,0 +1,817 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Cannot not run test without ip tool"
+ exit 1
+fi
+
+ANNOUNCED=6 # MPTCP_EVENT_ANNOUNCED
+REMOVED=7 # MPTCP_EVENT_REMOVED
+SUB_ESTABLISHED=10 # MPTCP_EVENT_SUB_ESTABLISHED
+SUB_CLOSED=11 # MPTCP_EVENT_SUB_CLOSED
+
+AF_INET=2
+AF_INET6=10
+
+evts_pid=0
+client4_pid=0
+server4_pid=0
+client6_pid=0
+server6_pid=0
+client4_token=""
+server4_token=""
+client6_token=""
+server6_token=""
+client4_port=0;
+client6_port=0;
+app4_port=50002
+new4_port=50003
+app6_port=50004
+client_addr_id=${RANDOM:0:2}
+server_addr_id=${RANDOM:0:2}
+
+sec=$(date +%s)
+rndh=$(stdbuf -o0 -e0 printf %x "$sec")-$(mktemp -u XXXXXX)
+ns1="ns1-$rndh"
+ns2="ns2-$rndh"
+
+kill_wait()
+{
+ kill $1 > /dev/null 2>&1
+ wait $1 2>/dev/null
+}
+
+cleanup()
+{
+ echo "cleanup"
+
+ rm -rf $file
+
+ # Terminate the MPTCP connection and related processes
+ if [ $client4_pid -ne 0 ]; then
+ kill -SIGUSR1 $client4_pid > /dev/null 2>&1
+ fi
+ if [ $server4_pid -ne 0 ]; then
+ kill_wait $server4_pid
+ fi
+ if [ $client6_pid -ne 0 ]; then
+ kill -SIGUSR1 $client6_pid > /dev/null 2>&1
+ fi
+ if [ $server6_pid -ne 0 ]; then
+ kill_wait $server6_pid
+ fi
+ if [ $evts_pid -ne 0 ]; then
+ kill_wait $evts_pid
+ fi
+ local netns
+ for netns in "$ns1" "$ns2" ;do
+ ip netns del "$netns"
+ done
+}
+
+trap cleanup EXIT
+
+# Create and configure network namespaces for testing
+for i in "$ns1" "$ns2" ;do
+ ip netns add "$i" || exit 1
+ ip -net "$i" link set lo up
+ ip netns exec "$i" sysctl -q net.mptcp.enabled=1
+ ip netns exec "$i" sysctl -q net.mptcp.pm_type=1
+done
+
+# "$ns1" ns2
+# ns1eth2 ns2eth1
+
+ip link add ns1eth2 netns "$ns1" type veth peer name ns2eth1 netns "$ns2"
+
+# Add IPv4/v6 addresses to the namespaces
+ip -net "$ns1" addr add 10.0.1.1/24 dev ns1eth2
+ip -net "$ns1" addr add 10.0.2.1/24 dev ns1eth2
+ip -net "$ns1" addr add dead:beef:1::1/64 dev ns1eth2 nodad
+ip -net "$ns1" addr add dead:beef:2::1/64 dev ns1eth2 nodad
+ip -net "$ns1" link set ns1eth2 up
+
+ip -net "$ns2" addr add 10.0.1.2/24 dev ns2eth1
+ip -net "$ns2" addr add 10.0.2.2/24 dev ns2eth1
+ip -net "$ns2" addr add dead:beef:1::2/64 dev ns2eth1 nodad
+ip -net "$ns2" addr add dead:beef:2::2/64 dev ns2eth1 nodad
+ip -net "$ns2" link set ns2eth1 up
+
+stdbuf -o0 -e0 printf "Created network namespaces ns1, ns2 \t\t\t[OK]\n"
+
+make_file()
+{
+ # Store a chunk of data in a file to transmit over an MPTCP connection
+ local name=$1
+ local ksize=1
+
+ dd if=/dev/urandom of="$name" bs=2 count=$ksize 2> /dev/null
+ echo -e "\nMPTCP_TEST_FILE_END_MARKER" >> "$name"
+}
+
+make_connection()
+{
+ local file
+ file=$(mktemp)
+ make_file "$file" "client"
+
+ local is_v6=$1
+ local app_port=$app4_port
+ local connect_addr="10.0.1.1"
+ local listen_addr="0.0.0.0"
+ if [ "$is_v6" = "v6" ]
+ then
+ connect_addr="dead:beef:1::1"
+ listen_addr="::"
+ app_port=$app6_port
+ else
+ is_v6="v4"
+ fi
+
+ # Capture netlink events over the two network namespaces running
+ # the MPTCP client and server
+ local client_evts
+ client_evts=$(mktemp)
+ :>"$client_evts"
+ ip netns exec "$ns2" ./pm_nl_ctl events >> "$client_evts" 2>&1 &
+ local client_evts_pid=$!
+ local server_evts
+ server_evts=$(mktemp)
+ :>"$server_evts"
+ ip netns exec "$ns1" ./pm_nl_ctl events >> "$server_evts" 2>&1 &
+ local server_evts_pid=$!
+ sleep 0.5
+
+ # Run the server
+ ip netns exec "$ns1" \
+ ./mptcp_connect -s MPTCP -w 300 -p $app_port -l $listen_addr > /dev/null 2>&1 &
+ local server_pid=$!
+ sleep 0.5
+
+ # Run the client, transfer $file and stay connected to the server
+ # to conduct tests
+ ip netns exec "$ns2" \
+ ./mptcp_connect -s MPTCP -w 300 -m sendfile -p $app_port $connect_addr\
+ 2>&1 > /dev/null < "$file" &
+ local client_pid=$!
+ sleep 1
+
+ # Capture client/server attributes from MPTCP connection netlink events
+ kill_wait $client_evts_pid
+
+ local client_token
+ local client_port
+ local client_serverside
+ local server_token
+ local server_serverside
+
+ client_token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
+ client_port=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
+ client_serverside=$(sed --unbuffered -n 's/.*\(server_side:\)\([[:digit:]]*\).*$/\2/p;q'\
+ "$client_evts")
+ kill_wait $server_evts_pid
+ server_token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$server_evts")
+ server_serverside=$(sed --unbuffered -n 's/.*\(server_side:\)\([[:digit:]]*\).*$/\2/p;q'\
+ "$server_evts")
+ rm -f "$client_evts" "$server_evts" "$file"
+
+ if [ "$client_token" != "" ] && [ "$server_token" != "" ] && [ "$client_serverside" = 0 ] &&
+ [ "$server_serverside" = 1 ]
+ then
+ stdbuf -o0 -e0 printf "Established IP%s MPTCP Connection ns2 => ns1 \t\t[OK]\n" $is_v6
+ else
+ exit 1
+ fi
+
+ if [ "$is_v6" = "v6" ]
+ then
+ client6_token=$client_token
+ server6_token=$server_token
+ client6_port=$client_port
+ client6_pid=$client_pid
+ server6_pid=$server_pid
+ else
+ client4_token=$client_token
+ server4_token=$server_token
+ client4_port=$client_port
+ client4_pid=$client_pid
+ server4_pid=$server_pid
+ fi
+}
+
+verify_announce_event()
+{
+ local evt=$1
+ local e_type=$2
+ local e_token=$3
+ local e_addr=$4
+ local e_id=$5
+ local e_dport=$6
+ local e_af=$7
+ local type
+ local token
+ local addr
+ local dport
+ local id
+
+ type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ if [ "$e_af" = "v6" ]
+ then
+ addr=$(sed --unbuffered -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q' "$evt")
+ else
+ addr=$(sed --unbuffered -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evt")
+ fi
+ dport=$(sed --unbuffered -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ id=$(sed --unbuffered -n 's/.*\(rem_id:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ if [ "$type" = "$e_type" ] && [ "$token" = "$e_token" ] &&
+ [ "$addr" = "$e_addr" ] && [ "$dport" = "$e_dport" ] &&
+ [ "$id" = "$e_id" ]
+ then
+ stdbuf -o0 -e0 printf "[OK]\n"
+ return 0
+ fi
+ stdbuf -o0 -e0 printf "[FAIL]\n"
+ exit 1
+}
+
+test_announce()
+{
+ local evts
+ evts=$(mktemp)
+ # Capture events on the network namespace running the server
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl events >> "$evts" 2>&1 &
+ evts_pid=$!
+ sleep 0.5
+
+ # ADD_ADDR using an invalid token should result in no action
+ local invalid_token=$(( client4_token - 1))
+ ip netns exec "$ns2" ./pm_nl_ctl ann 10.0.2.2 token $invalid_token id\
+ $client_addr_id dev ns2eth1 > /dev/null 2>&1
+
+ local type
+ type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+ stdbuf -o0 -e0 printf "ADD_ADDR 10.0.2.2 (ns2) => ns1, invalid token \t\t"
+ if [ "$type" = "" ]
+ then
+ stdbuf -o0 -e0 printf "[OK]\n"
+ else
+ stdbuf -o0 -e0 printf "[FAIL]\n"
+ exit 1
+ fi
+
+ # ADD_ADDR from the client to server machine reusing the subflow port
+ :>"$evts"
+ ip netns exec "$ns2"\
+ ./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id $client_addr_id dev\
+ ns2eth1 > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "ADD_ADDR id:%d 10.0.2.2 (ns2) => ns1, reuse port \t\t" $client_addr_id
+ sleep 0.5
+ verify_announce_event "$evts" "$ANNOUNCED" "$server4_token" "10.0.2.2" "$client_addr_id"\
+ "$client4_port"
+
+ # ADD_ADDR6 from the client to server machine reusing the subflow port
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl ann\
+ dead:beef:2::2 token "$client6_token" id $client_addr_id dev ns2eth1 > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "ADD_ADDR6 id:%d dead:beef:2::2 (ns2) => ns1, reuse port\t\t" $client_addr_id
+ sleep 0.5
+ verify_announce_event "$evts" "$ANNOUNCED" "$server6_token" "dead:beef:2::2"\
+ "$client_addr_id" "$client6_port" "v6"
+
+ # ADD_ADDR from the client to server machine using a new port
+ :>"$evts"
+ client_addr_id=$((client_addr_id+1))
+ ip netns exec "$ns2" ./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id\
+ $client_addr_id dev ns2eth1 port $new4_port > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "ADD_ADDR id:%d 10.0.2.2 (ns2) => ns1, new port \t\t\t" $client_addr_id
+ sleep 0.5
+ verify_announce_event "$evts" "$ANNOUNCED" "$server4_token" "10.0.2.2"\
+ "$client_addr_id" "$new4_port"
+
+ kill_wait $evts_pid
+
+ # Capture events on the network namespace running the client
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl events >> "$evts" 2>&1 &
+ evts_pid=$!
+ sleep 0.5
+
+ # ADD_ADDR from the server to client machine reusing the subflow port
+ ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\
+ $server_addr_id dev ns1eth2 > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "ADD_ADDR id:%d 10.0.2.1 (ns1) => ns2, reuse port \t\t" $server_addr_id
+ sleep 0.5
+ verify_announce_event "$evts" "$ANNOUNCED" "$client4_token" "10.0.2.1"\
+ "$server_addr_id" "$app4_port"
+
+ # ADD_ADDR6 from the server to client machine reusing the subflow port
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl ann dead:beef:2::1 token "$server6_token" id\
+ $server_addr_id dev ns1eth2 > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "ADD_ADDR6 id:%d dead:beef:2::1 (ns1) => ns2, reuse port\t\t" $server_addr_id
+ sleep 0.5
+ verify_announce_event "$evts" "$ANNOUNCED" "$client6_token" "dead:beef:2::1"\
+ "$server_addr_id" "$app6_port" "v6"
+
+ # ADD_ADDR from the server to client machine using a new port
+ :>"$evts"
+ server_addr_id=$((server_addr_id+1))
+ ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\
+ $server_addr_id dev ns1eth2 port $new4_port > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "ADD_ADDR id:%d 10.0.2.1 (ns1) => ns2, new port \t\t\t" $server_addr_id
+ sleep 0.5
+ verify_announce_event "$evts" "$ANNOUNCED" "$client4_token" "10.0.2.1"\
+ "$server_addr_id" "$new4_port"
+
+ kill_wait $evts_pid
+ rm -f "$evts"
+}
+
+verify_remove_event()
+{
+ local evt=$1
+ local e_type=$2
+ local e_token=$3
+ local e_id=$4
+ local type
+ local token
+ local id
+
+ type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ id=$(sed --unbuffered -n 's/.*\(rem_id:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ if [ "$type" = "$e_type" ] && [ "$token" = "$e_token" ] &&
+ [ "$id" = "$e_id" ]
+ then
+ stdbuf -o0 -e0 printf "[OK]\n"
+ return 0
+ fi
+ stdbuf -o0 -e0 printf "[FAIL]\n"
+ exit 1
+}
+
+test_remove()
+{
+ local evts
+ evts=$(mktemp)
+
+ # Capture events on the network namespace running the server
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl events >> "$evts" 2>&1 &
+ evts_pid=$!
+ sleep 0.5
+
+ # RM_ADDR using an invalid token should result in no action
+ local invalid_token=$(( client4_token - 1 ))
+ ip netns exec "$ns2" ./pm_nl_ctl rem token $invalid_token id\
+ $client_addr_id > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "RM_ADDR id:%d ns2 => ns1, invalid token \t"\
+ $client_addr_id
+ local type
+ type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+ if [ "$type" = "" ]
+ then
+ stdbuf -o0 -e0 printf "[OK]\n"
+ else
+ stdbuf -o0 -e0 printf "[FAIL]\n"
+ fi
+
+ # RM_ADDR using an invalid addr id should result in no action
+ local invalid_id=$(( client_addr_id + 1 ))
+ ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
+ $invalid_id > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "RM_ADDR id:%d ns2 => ns1, invalid id \t"\
+ $invalid_id
+ type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+ if [ "$type" = "" ]
+ then
+ stdbuf -o0 -e0 printf "[OK]\n"
+ else
+ stdbuf -o0 -e0 printf "[FAIL]\n"
+ fi
+
+ # RM_ADDR from the client to server machine
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
+ $client_addr_id > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "RM_ADDR id:%d ns2 => ns1 \t"\
+ $client_addr_id
+ sleep 0.5
+ verify_remove_event "$evts" "$REMOVED" "$server4_token" "$client_addr_id"
+
+ # RM_ADDR from the client to server machine
+ :>"$evts"
+ client_addr_id=$(( client_addr_id - 1 ))
+ ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
+ $client_addr_id > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "RM_ADDR id:%d ns2 => ns1 \t"\
+ $client_addr_id
+ sleep 0.5
+ verify_remove_event "$evts" "$REMOVED" "$server4_token" "$client_addr_id"
+
+ # RM_ADDR6 from the client to server machine
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl rem token "$client6_token" id\
+ $client_addr_id > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "RM_ADDR6 id:%d ns2 => ns1 \t"\
+ $client_addr_id
+ sleep 0.5
+ verify_remove_event "$evts" "$REMOVED" "$server6_token" "$client_addr_id"
+
+ kill_wait $evts_pid
+
+ # Capture events on the network namespace running the client
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl events >> "$evts" 2>&1 &
+ evts_pid=$!
+ sleep 0.5
+
+ # RM_ADDR from the server to client machine
+ ip netns exec "$ns1" ./pm_nl_ctl rem token "$server4_token" id\
+ $server_addr_id > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "RM_ADDR id:%d ns1 => ns2 \t"\
+ $server_addr_id
+ sleep 0.5
+ verify_remove_event "$evts" "$REMOVED" "$client4_token" "$server_addr_id"
+
+ # RM_ADDR from the server to client machine
+ :>"$evts"
+ server_addr_id=$(( server_addr_id - 1 ))
+ ip netns exec "$ns1" ./pm_nl_ctl rem token "$server4_token" id\
+ $server_addr_id > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "RM_ADDR id:%d ns1 => ns2 \t" $server_addr_id
+ sleep 0.5
+ verify_remove_event "$evts" "$REMOVED" "$client4_token" "$server_addr_id"
+
+ # RM_ADDR6 from the server to client machine
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl rem token "$server6_token" id\
+ $server_addr_id > /dev/null 2>&1
+ stdbuf -o0 -e0 printf "RM_ADDR6 id:%d ns1 => ns2 \t" $server_addr_id
+ sleep 0.5
+ verify_remove_event "$evts" "$REMOVED" "$client6_token" "$server_addr_id"
+
+ kill_wait $evts_pid
+ rm -f "$evts"
+}
+
+verify_subflow_events()
+{
+ local evt=$1
+ local e_type=$2
+ local e_token=$3
+ local e_family=$4
+ local e_saddr=$5
+ local e_daddr=$6
+ local e_dport=$7
+ local e_locid=$8
+ local e_remid=$9
+ shift 2
+ local e_from=$8
+ local e_to=$9
+ local type
+ local token
+ local family
+ local saddr
+ local daddr
+ local dport
+ local locid
+ local remid
+
+ if [ "$e_type" = "$SUB_ESTABLISHED" ]
+ then
+ if [ "$e_family" = "$AF_INET6" ]
+ then
+ stdbuf -o0 -e0 printf "CREATE_SUBFLOW6 %s (%s) => %s (%s) "\
+ "$e_saddr" "$e_from" "$e_daddr" "$e_to"
+ else
+ stdbuf -o0 -e0 printf "CREATE_SUBFLOW %s (%s) => %s (%s) \t"\
+ "$e_saddr" "$e_from" "$e_daddr" "$e_to"
+ fi
+ else
+ if [ "$e_family" = "$AF_INET6" ]
+ then
+ stdbuf -o0 -e0 printf "DESTROY_SUBFLOW6 %s (%s) => %s (%s) "\
+ "$e_saddr" "$e_from" "$e_daddr" "$e_to"
+ else
+ stdbuf -o0 -e0 printf "DESTROY_SUBFLOW %s (%s) => %s (%s) \t"\
+ "$e_saddr" "$e_from" "$e_daddr" "$e_to"
+ fi
+ fi
+
+ type=$(sed --unbuffered -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ token=$(sed --unbuffered -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ family=$(sed --unbuffered -n 's/.*\(family:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ dport=$(sed --unbuffered -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ locid=$(sed --unbuffered -n 's/.*\(loc_id:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ remid=$(sed --unbuffered -n 's/.*\(rem_id:\)\([[:digit:]]*\).*$/\2/p;q' "$evt")
+ if [ "$family" = "$AF_INET6" ]
+ then
+ saddr=$(sed --unbuffered -n 's/.*\(saddr6:\)\([0-9a-f:.]*\).*$/\2/p;q' "$evt")
+ daddr=$(sed --unbuffered -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q' "$evt")
+ else
+ saddr=$(sed --unbuffered -n 's/.*\(saddr4:\)\([0-9.]*\).*$/\2/p;q' "$evt")
+ daddr=$(sed --unbuffered -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evt")
+ fi
+
+ if [ "$type" = "$e_type" ] && [ "$token" = "$e_token" ] &&
+ [ "$daddr" = "$e_daddr" ] && [ "$e_dport" = "$dport" ] &&
+ [ "$family" = "$e_family" ] && [ "$saddr" = "$e_saddr" ] &&
+ [ "$e_locid" = "$locid" ] && [ "$e_remid" = "$remid" ]
+ then
+ stdbuf -o0 -e0 printf "[OK]\n"
+ return 0
+ fi
+ stdbuf -o0 -e0 printf "[FAIL]\n"
+ exit 1
+}
+
+test_subflows()
+{
+ local evts
+ evts=$(mktemp)
+ # Capture events on the network namespace running the server
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl events >> "$evts" 2>&1 &
+ evts_pid=$!
+ sleep 0.5
+
+ # Attempt to add a listener at 10.0.2.2:<subflow-port>
+ ip netns exec "$ns2" ./pm_nl_ctl listen 10.0.2.2\
+ "$client4_port" > /dev/null 2>&1 &
+ local listener_pid=$!
+
+ # ADD_ADDR from client to server machine reusing the subflow port
+ ip netns exec "$ns2" ./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id\
+ $client_addr_id > /dev/null 2>&1
+ sleep 0.5
+
+ # CREATE_SUBFLOW from server to client machine
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl csf lip 10.0.2.1 lid 23 rip 10.0.2.2\
+ rport "$client4_port" token "$server4_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_ESTABLISHED" "$server4_token" "$AF_INET" "10.0.2.1"\
+ "10.0.2.2" "$client4_port" "23" "$client_addr_id" "ns1" "ns2"
+
+ # Delete the listener from the client ns, if one was created
+ kill_wait $listener_pid
+
+ local sport
+ sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+
+ # DESTROY_SUBFLOW from server to client machine
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl dsf lip 10.0.2.1 lport "$sport" rip 10.0.2.2 rport\
+ "$client4_port" token "$server4_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_CLOSED" "$server4_token" "$AF_INET" "10.0.2.1"\
+ "10.0.2.2" "$client4_port" "23" "$client_addr_id" "ns1" "ns2"
+
+ # RM_ADDR from client to server machine
+ ip netns exec "$ns2" ./pm_nl_ctl rem id $client_addr_id token\
+ "$client4_token" > /dev/null 2>&1
+ sleep 0.5
+
+ # Attempt to add a listener at dead:beef:2::2:<subflow-port>
+ ip netns exec "$ns2" ./pm_nl_ctl listen dead:beef:2::2\
+ "$client6_port" > /dev/null 2>&1 &
+ listener_pid=$!
+
+ # ADD_ADDR6 from client to server machine reusing the subflow port
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl ann dead:beef:2::2 token "$client6_token" id\
+ $client_addr_id > /dev/null 2>&1
+ sleep 0.5
+
+ # CREATE_SUBFLOW6 from server to client machine
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl csf lip dead:beef:2::1 lid 23 rip\
+ dead:beef:2::2 rport "$client6_port" token "$server6_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_ESTABLISHED" "$server6_token" "$AF_INET6"\
+ "dead:beef:2::1" "dead:beef:2::2" "$client6_port" "23"\
+ "$client_addr_id" "ns1" "ns2"
+
+ # Delete the listener from the client ns, if one was created
+ kill_wait $listener_pid
+
+ sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+
+ # DESTROY_SUBFLOW6 from server to client machine
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl dsf lip dead:beef:2::1 lport "$sport" rip\
+ dead:beef:2::2 rport "$client6_port" token "$server6_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_CLOSED" "$server6_token" "$AF_INET6"\
+ "dead:beef:2::1" "dead:beef:2::2" "$client6_port" "23"\
+ "$client_addr_id" "ns1" "ns2"
+
+ # RM_ADDR from client to server machine
+ ip netns exec "$ns2" ./pm_nl_ctl rem id $client_addr_id token\
+ "$client6_token" > /dev/null 2>&1
+ sleep 0.5
+
+ # Attempt to add a listener at 10.0.2.2:<new-port>
+ ip netns exec "$ns2" ./pm_nl_ctl listen 10.0.2.2\
+ $new4_port > /dev/null 2>&1 &
+ listener_pid=$!
+
+ # ADD_ADDR from client to server machine using a new port
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id\
+ $client_addr_id port $new4_port > /dev/null 2>&1
+ sleep 0.5
+
+ # CREATE_SUBFLOW from server to client machine
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl csf lip 10.0.2.1 lid 23 rip 10.0.2.2 rport\
+ $new4_port token "$server4_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_ESTABLISHED" "$server4_token" "$AF_INET"\
+ "10.0.2.1" "10.0.2.2" "$new4_port" "23"\
+ "$client_addr_id" "ns1" "ns2"
+
+ # Delete the listener from the client ns, if one was created
+ kill_wait $listener_pid
+
+ sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+
+ # DESTROY_SUBFLOW from server to client machine
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl dsf lip 10.0.2.1 lport "$sport" rip 10.0.2.2 rport\
+ $new4_port token "$server4_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_CLOSED" "$server4_token" "$AF_INET" "10.0.2.1"\
+ "10.0.2.2" "$new4_port" "23" "$client_addr_id" "ns1" "ns2"
+
+ # RM_ADDR from client to server machine
+ ip netns exec "$ns2" ./pm_nl_ctl rem id $client_addr_id token\
+ "$client4_token" > /dev/null 2>&1
+
+ kill_wait $evts_pid
+
+ # Capture events on the network namespace running the client
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl events >> "$evts" 2>&1 &
+ evts_pid=$!
+ sleep 0.5
+
+ # Attempt to add a listener at 10.0.2.1:<subflow-port>
+ ip netns exec "$ns1" ./pm_nl_ctl listen 10.0.2.1\
+ $app4_port > /dev/null 2>&1 &
+ listener_pid=$!
+
+ # ADD_ADDR from server to client machine reusing the subflow port
+ ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\
+ $server_addr_id > /dev/null 2>&1
+ sleep 0.5
+
+ # CREATE_SUBFLOW from client to server machine
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl csf lip 10.0.2.2 lid 23 rip 10.0.2.1 rport\
+ $app4_port token "$client4_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_ESTABLISHED" "$client4_token" "$AF_INET" "10.0.2.2"\
+ "10.0.2.1" "$app4_port" "23" "$server_addr_id" "ns2" "ns1"
+
+ # Delete the listener from the server ns, if one was created
+ kill_wait $listener_pid
+
+ sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+
+ # DESTROY_SUBFLOW from client to server machine
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl dsf lip 10.0.2.2 lport "$sport" rip 10.0.2.1 rport\
+ $app4_port token "$client4_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_CLOSED" "$client4_token" "$AF_INET" "10.0.2.2"\
+ "10.0.2.1" "$app4_port" "23" "$server_addr_id" "ns2" "ns1"
+
+ # RM_ADDR from server to client machine
+ ip netns exec "$ns1" ./pm_nl_ctl rem id $server_addr_id token\
+ "$server4_token" > /dev/null 2>&1
+ sleep 0.5
+
+ # Attempt to add a listener at dead:beef:2::1:<subflow-port>
+ ip netns exec "$ns1" ./pm_nl_ctl listen dead:beef:2::1\
+ $app6_port > /dev/null 2>&1 &
+ listener_pid=$!
+
+ # ADD_ADDR6 from server to client machine reusing the subflow port
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl ann dead:beef:2::1 token "$server6_token" id\
+ $server_addr_id > /dev/null 2>&1
+ sleep 0.5
+
+ # CREATE_SUBFLOW6 from client to server machine
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl csf lip dead:beef:2::2 lid 23 rip\
+ dead:beef:2::1 rport $app6_port token "$client6_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_ESTABLISHED" "$client6_token"\
+ "$AF_INET6" "dead:beef:2::2"\
+ "dead:beef:2::1" "$app6_port" "23"\
+ "$server_addr_id" "ns2" "ns1"
+
+ # Delete the listener from the server ns, if one was created
+ kill_wait $listener_pid
+
+ sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+
+ # DESTROY_SUBFLOW6 from client to server machine
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl dsf lip dead:beef:2::2 lport "$sport" rip\
+ dead:beef:2::1 rport $app6_port token "$client6_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_CLOSED" "$client6_token" "$AF_INET6" "dead:beef:2::2"\
+ "dead:beef:2::1" "$app6_port" "23" "$server_addr_id" "ns2" "ns1"
+
+ # RM_ADDR6 from server to client machine
+ ip netns exec "$ns1" ./pm_nl_ctl rem id $server_addr_id token\
+ "$server6_token" > /dev/null 2>&1
+ sleep 0.5
+
+ # Attempt to add a listener at 10.0.2.1:<new-port>
+ ip netns exec "$ns1" ./pm_nl_ctl listen 10.0.2.1\
+ $new4_port > /dev/null 2>&1 &
+ listener_pid=$!
+
+ # ADD_ADDR from server to client machine using a new port
+ :>"$evts"
+ ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\
+ $server_addr_id port $new4_port > /dev/null 2>&1
+ sleep 0.5
+
+ # CREATE_SUBFLOW from client to server machine
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl csf lip 10.0.2.2 lid 23 rip 10.0.2.1 rport\
+ $new4_port token "$client4_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_ESTABLISHED" "$client4_token" "$AF_INET"\
+ "10.0.2.2" "10.0.2.1" "$new4_port" "23" "$server_addr_id" "ns2" "ns1"
+
+ # Delete the listener from the server ns, if one was created
+ kill_wait $listener_pid
+
+ sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts")
+
+ # DESTROY_SUBFLOW from client to server machine
+ :>"$evts"
+ ip netns exec "$ns2" ./pm_nl_ctl dsf lip 10.0.2.2 lport "$sport" rip 10.0.2.1 rport\
+ $new4_port token "$client4_token" > /dev/null 2>&1
+ sleep 0.5
+ verify_subflow_events "$evts" "$SUB_CLOSED" "$client4_token" "$AF_INET" "10.0.2.2"\
+ "10.0.2.1" "$new4_port" "23" "$server_addr_id" "ns2" "ns1"
+
+ # RM_ADDR from server to client machine
+ ip netns exec "$ns1" ./pm_nl_ctl rem id $server_addr_id token\
+ "$server4_token" > /dev/null 2>&1
+
+ kill_wait $evts_pid
+ rm -f "$evts"
+}
+
+test_prio()
+{
+ local count
+
+ # Send MP_PRIO signal from client to server machine
+ ip netns exec "$ns2" ./pm_nl_ctl set 10.0.1.2 port "$client4_port" flags backup token "$client4_token" rip 10.0.1.1 rport "$server4_port"
+ sleep 0.5
+
+ # Check TX
+ stdbuf -o0 -e0 printf "MP_PRIO TX \t"
+ count=$(ip netns exec "$ns2" nstat -as | grep MPTcpExtMPPrioTx | awk '{print $2}')
+ [ -z "$count" ] && count=0
+ if [ $count != 1 ]; then
+ stdbuf -o0 -e0 printf "[FAIL]\n"
+ exit 1
+ else
+ stdbuf -o0 -e0 printf "[OK]\n"
+ fi
+
+ # Check RX
+ stdbuf -o0 -e0 printf "MP_PRIO RX \t"
+ count=$(ip netns exec "$ns1" nstat -as | grep MPTcpExtMPPrioRx | awk '{print $2}')
+ [ -z "$count" ] && count=0
+ if [ $count != 1 ]; then
+ stdbuf -o0 -e0 printf "[FAIL]\n"
+ exit 1
+ else
+ stdbuf -o0 -e0 printf "[OK]\n"
+ fi
+}
+
+make_connection
+make_connection "v6"
+test_announce
+test_remove
+test_subflows
+test_prio
+
+exit 0
diff --git a/tools/testing/selftests/net/ndisc_unsolicited_na_test.sh b/tools/testing/selftests/net/ndisc_unsolicited_na_test.sh
new file mode 100755
index 000000000000..86e621b7b9c7
--- /dev/null
+++ b/tools/testing/selftests/net/ndisc_unsolicited_na_test.sh
@@ -0,0 +1,254 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This test is for the accept_untracked_na feature to
+# enable RFC9131 behaviour. The following is the test-matrix.
+# drop accept fwding behaviour
+# ---- ------ ------ ----------------------------------------------
+# 1 X X Don't update NC
+# 0 0 X Don't update NC
+# 0 1 0 Don't update NC
+# 0 1 1 Add a STALE NC entry
+
+ret=0
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+PAUSE_ON_FAIL=no
+PAUSE=no
+
+HOST_NS="ns-host"
+ROUTER_NS="ns-router"
+
+HOST_INTF="veth-host"
+ROUTER_INTF="veth-router"
+
+ROUTER_ADDR="2000:20::1"
+HOST_ADDR="2000:20::2"
+SUBNET_WIDTH=64
+ROUTER_ADDR_WITH_MASK="${ROUTER_ADDR}/${SUBNET_WIDTH}"
+HOST_ADDR_WITH_MASK="${HOST_ADDR}/${SUBNET_WIDTH}"
+
+IP_HOST="ip -6 -netns ${HOST_NS}"
+IP_HOST_EXEC="ip netns exec ${HOST_NS}"
+IP_ROUTER="ip -6 -netns ${ROUTER_NS}"
+IP_ROUTER_EXEC="ip netns exec ${ROUTER_NS}"
+
+tcpdump_stdout=
+tcpdump_stderr=
+
+log_test()
+{
+ local rc=$1
+ local expected=$2
+ local msg="$3"
+
+ if [ ${rc} -eq ${expected} ]; then
+ printf " TEST: %-60s [ OK ]\n" "${msg}"
+ nsuccess=$((nsuccess+1))
+ else
+ ret=1
+ nfail=$((nfail+1))
+ printf " TEST: %-60s [FAIL]\n" "${msg}"
+ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+ echo
+ echo "hit enter to continue, 'q' to quit"
+ read a
+ [ "$a" = "q" ] && exit 1
+ fi
+ fi
+
+ if [ "${PAUSE}" = "yes" ]; then
+ echo
+ echo "hit enter to continue, 'q' to quit"
+ read a
+ [ "$a" = "q" ] && exit 1
+ fi
+}
+
+setup()
+{
+ set -e
+
+ local drop_unsolicited_na=$1
+ local accept_untracked_na=$2
+ local forwarding=$3
+
+ # Setup two namespaces and a veth tunnel across them.
+ # On end of the tunnel is a router and the other end is a host.
+ ip netns add ${HOST_NS}
+ ip netns add ${ROUTER_NS}
+ ${IP_ROUTER} link add ${ROUTER_INTF} type veth \
+ peer name ${HOST_INTF} netns ${HOST_NS}
+
+ # Enable IPv6 on both router and host, and configure static addresses.
+ # The router here is the DUT
+ # Setup router configuration as specified by the arguments.
+ # forwarding=0 case is to check that a non-router
+ # doesn't add neighbour entries.
+ ROUTER_CONF=net.ipv6.conf.${ROUTER_INTF}
+ ${IP_ROUTER_EXEC} sysctl -qw \
+ ${ROUTER_CONF}.forwarding=${forwarding}
+ ${IP_ROUTER_EXEC} sysctl -qw \
+ ${ROUTER_CONF}.drop_unsolicited_na=${drop_unsolicited_na}
+ ${IP_ROUTER_EXEC} sysctl -qw \
+ ${ROUTER_CONF}.accept_untracked_na=${accept_untracked_na}
+ ${IP_ROUTER_EXEC} sysctl -qw ${ROUTER_CONF}.disable_ipv6=0
+ ${IP_ROUTER} addr add ${ROUTER_ADDR_WITH_MASK} dev ${ROUTER_INTF}
+
+ # Turn on ndisc_notify on host interface so that
+ # the host sends unsolicited NAs.
+ HOST_CONF=net.ipv6.conf.${HOST_INTF}
+ ${IP_HOST_EXEC} sysctl -qw ${HOST_CONF}.ndisc_notify=1
+ ${IP_HOST_EXEC} sysctl -qw ${HOST_CONF}.disable_ipv6=0
+ ${IP_HOST} addr add ${HOST_ADDR_WITH_MASK} dev ${HOST_INTF}
+
+ set +e
+}
+
+start_tcpdump() {
+ set -e
+ tcpdump_stdout=`mktemp`
+ tcpdump_stderr=`mktemp`
+ ${IP_ROUTER_EXEC} timeout 15s \
+ tcpdump --immediate-mode -tpni ${ROUTER_INTF} -c 1 \
+ "icmp6 && icmp6[0] == 136 && src ${HOST_ADDR}" \
+ > ${tcpdump_stdout} 2> /dev/null
+ set +e
+}
+
+cleanup_tcpdump()
+{
+ set -e
+ [[ ! -z ${tcpdump_stdout} ]] && rm -f ${tcpdump_stdout}
+ [[ ! -z ${tcpdump_stderr} ]] && rm -f ${tcpdump_stderr}
+ tcpdump_stdout=
+ tcpdump_stderr=
+ set +e
+}
+
+cleanup()
+{
+ cleanup_tcpdump
+ ip netns del ${HOST_NS}
+ ip netns del ${ROUTER_NS}
+}
+
+link_up() {
+ set -e
+ ${IP_ROUTER} link set dev ${ROUTER_INTF} up
+ ${IP_HOST} link set dev ${HOST_INTF} up
+ set +e
+}
+
+verify_ndisc() {
+ local drop_unsolicited_na=$1
+ local accept_untracked_na=$2
+ local forwarding=$3
+
+ neigh_show_output=$(${IP_ROUTER} neigh show \
+ to ${HOST_ADDR} dev ${ROUTER_INTF} nud stale)
+ if [ ${drop_unsolicited_na} -eq 0 ] && \
+ [ ${accept_untracked_na} -eq 1 ] && \
+ [ ${forwarding} -eq 1 ]; then
+ # Neighbour entry expected to be present for 011 case
+ [[ ${neigh_show_output} ]]
+ else
+ # Neighbour entry expected to be absent for all other cases
+ [[ -z ${neigh_show_output} ]]
+ fi
+}
+
+test_unsolicited_na_common()
+{
+ # Setup the test bed, but keep links down
+ setup $1 $2 $3
+
+ # Bring the link up, wait for the NA,
+ # and add a delay to ensure neighbour processing is done.
+ link_up
+ start_tcpdump
+
+ # Verify the neighbour table
+ verify_ndisc $1 $2 $3
+
+}
+
+test_unsolicited_na_combination() {
+ test_unsolicited_na_common $1 $2 $3
+ test_msg=("test_unsolicited_na: "
+ "drop_unsolicited_na=$1 "
+ "accept_untracked_na=$2 "
+ "forwarding=$3")
+ log_test $? 0 "${test_msg[*]}"
+ cleanup
+}
+
+test_unsolicited_na_combinations() {
+ # Args: drop_unsolicited_na accept_untracked_na forwarding
+
+ # Expect entry
+ test_unsolicited_na_combination 0 1 1
+
+ # Expect no entry
+ test_unsolicited_na_combination 0 0 0
+ test_unsolicited_na_combination 0 0 1
+ test_unsolicited_na_combination 0 1 0
+ test_unsolicited_na_combination 1 0 0
+ test_unsolicited_na_combination 1 0 1
+ test_unsolicited_na_combination 1 1 0
+ test_unsolicited_na_combination 1 1 1
+}
+
+###############################################################################
+# usage
+
+usage()
+{
+ cat <<EOF
+usage: ${0##*/} OPTS
+ -p Pause on fail
+ -P Pause after each test before cleanup
+EOF
+}
+
+###############################################################################
+# main
+
+while getopts :pPh o
+do
+ case $o in
+ p) PAUSE_ON_FAIL=yes;;
+ P) PAUSE=yes;;
+ h) usage; exit 0;;
+ *) usage; exit 1;;
+ esac
+done
+
+# make sure we don't pause twice
+[ "${PAUSE}" = "yes" ] && PAUSE_ON_FAIL=no
+
+if [ "$(id -u)" -ne 0 ];then
+ echo "SKIP: Need root privileges"
+ exit $ksft_skip;
+fi
+
+if [ ! -x "$(command -v ip)" ]; then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+if [ ! -x "$(command -v tcpdump)" ]; then
+ echo "SKIP: Could not run test without tcpdump tool"
+ exit $ksft_skip
+fi
+
+# start clean
+cleanup &> /dev/null
+
+test_unsolicited_na_combinations
+
+printf "\nTests passed: %3d\n" ${nsuccess}
+printf "Tests failed: %3d\n" ${nfail}
+
+exit $ret
diff --git a/tools/testing/selftests/net/psock_snd.c b/tools/testing/selftests/net/psock_snd.c
index 7d15e10a9fb6..edf1e6f80d41 100644
--- a/tools/testing/selftests/net/psock_snd.c
+++ b/tools/testing/selftests/net/psock_snd.c
@@ -389,6 +389,8 @@ int main(int argc, char **argv)
error(1, errno, "ip link set mtu");
if (system("ip addr add dev lo 172.17.0.1/24"))
error(1, errno, "ip addr add");
+ if (system("sysctl -w net.ipv4.conf.lo.accept_local=1"))
+ error(1, errno, "sysctl lo.accept_local");
run_test();
diff --git a/tools/testing/selftests/net/srv6_hencap_red_l3vpn_test.sh b/tools/testing/selftests/net/srv6_hencap_red_l3vpn_test.sh
new file mode 100755
index 000000000000..28a775654b92
--- /dev/null
+++ b/tools/testing/selftests/net/srv6_hencap_red_l3vpn_test.sh
@@ -0,0 +1,879 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# author: Andrea Mayer <andrea.mayer@uniroma2.it>
+#
+# This script is designed for testing the SRv6 H.Encaps.Red behavior.
+#
+# Below is depicted the IPv6 network of an operator which offers advanced
+# IPv4/IPv6 VPN services to hosts, enabling them to communicate with each
+# other.
+# In this example, hosts hs-1 and hs-2 are connected through an IPv4/IPv6 VPN
+# service, while hs-3 and hs-4 are connected using an IPv6 only VPN.
+#
+# Routers rt-1,rt-2,rt-3 and rt-4 implement IPv4/IPv6 L3 VPN services
+# leveraging the SRv6 architecture. The key components for such VPNs are:
+#
+# i) The SRv6 H.Encaps.Red behavior applies SRv6 Policies on traffic received
+# by connected hosts, initiating the VPN tunnel. Such a behavior is an
+# optimization of the SRv6 H.Encap aiming to reduce the length of the SID
+# List carried in the pushed SRH. Specifically, the H.Encaps.Red removes
+# the first SID contained in the SID List (i.e. SRv6 Policy) by storing it
+# into the IPv6 Destination Address. When a SRv6 Policy is made of only one
+# SID, the SRv6 H.Encaps.Red behavior omits the SRH at all and pushes that
+# SID directly into the IPv6 DA;
+#
+# ii) The SRv6 End behavior advances the active SID in the SID List carried by
+# the SRH;
+#
+# iii) The SRv6 End.DT46 behavior is used for removing the SRv6 Policy and,
+# thus, it terminates the VPN tunnel. Such a behavior is capable of
+# handling, at the same time, both tunneled IPv4 and IPv6 traffic.
+#
+#
+# cafe::1 cafe::2
+# 10.0.0.1 10.0.0.2
+# +--------+ +--------+
+# | | | |
+# | hs-1 | | hs-2 |
+# | | | |
+# +---+----+ +--- +---+
+# cafe::/64 | | cafe::/64
+# 10.0.0.0/24 | | 10.0.0.0/24
+# +---+----+ +----+---+
+# | | fcf0:0:1:2::/64 | |
+# | rt-1 +-------------------+ rt-2 |
+# | | | |
+# +---+----+ +----+---+
+# | . . |
+# | fcf0:0:1:3::/64 . |
+# | . . |
+# | . . |
+# fcf0:0:1:4::/64 | . | fcf0:0:2:3::/64
+# | . . |
+# | . . |
+# | fcf0:0:2:4::/64 . |
+# | . . |
+# +---+----+ +----+---+
+# | | | |
+# | rt-4 +-------------------+ rt-3 |
+# | | fcf0:0:3:4::/64 | |
+# +---+----+ +----+---+
+# cafe::/64 | | cafe::/64
+# 10.0.0.0/24 | | 10.0.0.0/24
+# +---+----+ +--- +---+
+# | | | |
+# | hs-4 | | hs-3 |
+# | | | |
+# +--------+ +--------+
+# cafe::4 cafe::3
+# 10.0.0.4 10.0.0.3
+#
+#
+# Every fcf0:0:x:y::/64 network interconnects the SRv6 routers rt-x with rt-y
+# in the IPv6 operator network.
+#
+# Local SID table
+# ===============
+#
+# Each SRv6 router is configured with a Local SID table in which SIDs are
+# stored. Considering the given SRv6 router rt-x, at least two SIDs are
+# configured in the Local SID table:
+#
+# Local SID table for SRv6 router rt-x
+# +----------------------------------------------------------+
+# |fcff:x::e is associated with the SRv6 End behavior |
+# |fcff:x::d46 is associated with the SRv6 End.DT46 behavior |
+# +----------------------------------------------------------+
+#
+# The fcff::/16 prefix is reserved by the operator for implementing SRv6 VPN
+# services. Reachability of SIDs is ensured by proper configuration of the IPv6
+# operator's network and SRv6 routers.
+#
+# # SRv6 Policies
+# ===============
+#
+# An SRv6 ingress router applies SRv6 policies to the traffic received from a
+# connected host. SRv6 policy enforcement consists of encapsulating the
+# received traffic into a new IPv6 packet with a given SID List contained in
+# the SRH.
+#
+# IPv4/IPv6 VPN between hs-1 and hs-2
+# -----------------------------------
+#
+# Hosts hs-1 and hs-2 are connected using dedicated IPv4/IPv6 VPNs.
+# Specifically, packets generated from hs-1 and directed towards hs-2 are
+# handled by rt-1 which applies the following SRv6 Policies:
+#
+# i.a) IPv6 traffic, SID List=fcff:3::e,fcff:4::e,fcff:2::d46
+# ii.a) IPv4 traffic, SID List=fcff:2::d46
+#
+# Policy (i.a) steers tunneled IPv6 traffic through SRv6 routers
+# rt-3,rt-4,rt-2. Instead, Policy (ii.a) steers tunneled IPv4 traffic through
+# rt-2.
+# The H.Encaps.Red reduces the SID List (i.a) carried in SRH by removing the
+# first SID (fcff:3::e) and pushing it into the IPv6 DA. In case of IPv4
+# traffic, the H.Encaps.Red omits the presence of SRH at all, since the SID
+# List (ii.a) consists of only one SID that can be stored directly in the IPv6
+# DA.
+#
+# On the reverse path (i.e. from hs-2 to hs-1), rt-2 applies the following
+# policies:
+#
+# i.b) IPv6 traffic, SID List=fcff:1::d46
+# ii.b) IPv4 traffic, SID List=fcff:4::e,fcff:3::e,fcff:1::d46
+#
+# Policy (i.b) steers tunneled IPv6 traffic through the SRv6 router rt-1.
+# Conversely, Policy (ii.b) steers tunneled IPv4 traffic through SRv6 routers
+# rt-4,rt-3,rt-1.
+# The H.Encaps.Red omits the SRH at all in case of (i.b) by pushing the single
+# SID (fcff::1::d46) inside the IPv6 DA.
+# The H.Encaps.Red reduces the SID List (ii.b) in the SRH by removing the first
+# SID (fcff:4::e) and pushing it into the IPv6 DA.
+#
+# In summary:
+# hs-1->hs-2 |IPv6 DA=fcff:3::e|SRH SIDs=fcff:4::e,fcff:2::d46|IPv6|...| (i.a)
+# hs-1->hs-2 |IPv6 DA=fcff:2::d46|IPv4|...| (ii.a)
+#
+# hs-2->hs-1 |IPv6 DA=fcff:1::d46|IPv6|...| (i.b)
+# hs-2->hs-1 |IPv6 DA=fcff:4::e|SRH SIDs=fcff:3::e,fcff:1::d46|IPv4|...| (ii.b)
+#
+#
+# IPv6 VPN between hs-3 and hs-4
+# ------------------------------
+#
+# Hosts hs-3 and hs-4 are connected using a dedicated IPv6 only VPN.
+# Specifically, packets generated from hs-3 and directed towards hs-4 are
+# handled by rt-3 which applies the following SRv6 Policy:
+#
+# i.c) IPv6 traffic, SID List=fcff:2::e,fcff:4::d46
+#
+# Policy (i.c) steers tunneled IPv6 traffic through SRv6 routers rt-2,rt-4.
+# The H.Encaps.Red reduces the SID List (i.c) carried in SRH by pushing the
+# first SID (fcff:2::e) in the IPv6 DA.
+#
+# On the reverse path (i.e. from hs-4 to hs-3) the router rt-4 applies the
+# following SRv6 Policy:
+#
+# i.d) IPv6 traffic, SID List=fcff:1::e,fcff:3::d46.
+#
+# Policy (i.d) steers tunneled IPv6 traffic through SRv6 routers rt-1,rt-3.
+# The H.Encaps.Red reduces the SID List (i.d) carried in SRH by pushing the
+# first SID (fcff:1::e) in the IPv6 DA.
+#
+# In summary:
+# hs-3->hs-4 |IPv6 DA=fcff:2::e|SRH SIDs=fcff:4::d46|IPv6|...| (i.c)
+# hs-4->hs-3 |IPv6 DA=fcff:1::e|SRH SIDs=fcff:3::d46|IPv6|...| (i.d)
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+readonly ksft_skip=4
+
+readonly RDMSUFF="$(mktemp -u XXXXXXXX)"
+readonly VRF_TID=100
+readonly VRF_DEVNAME="vrf-${VRF_TID}"
+readonly RT2HS_DEVNAME="veth-t${VRF_TID}"
+readonly LOCALSID_TABLE_ID=90
+readonly IPv6_RT_NETWORK=fcf0:0
+readonly IPv6_HS_NETWORK=cafe
+readonly IPv4_HS_NETWORK=10.0.0
+readonly VPN_LOCATOR_SERVICE=fcff
+readonly END_FUNC=000e
+readonly DT46_FUNC=0d46
+
+PING_TIMEOUT_SEC=4
+PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
+
+# IDs of routers and hosts are initialized during the setup of the testing
+# network
+ROUTERS=''
+HOSTS=''
+
+SETUP_ERR=1
+
+ret=${ksft_skip}
+nsuccess=0
+nfail=0
+
+log_test()
+{
+ local rc="$1"
+ local expected="$2"
+ local msg="$3"
+
+ if [ "${rc}" -eq "${expected}" ]; then
+ nsuccess=$((nsuccess+1))
+ printf "\n TEST: %-60s [ OK ]\n" "${msg}"
+ else
+ ret=1
+ nfail=$((nfail+1))
+ printf "\n TEST: %-60s [FAIL]\n" "${msg}"
+ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+ echo
+ echo "hit enter to continue, 'q' to quit"
+ read a
+ [ "$a" = "q" ] && exit 1
+ fi
+ fi
+}
+
+print_log_test_results()
+{
+ printf "\nTests passed: %3d\n" "${nsuccess}"
+ printf "Tests failed: %3d\n" "${nfail}"
+
+ # when a test fails, the value of 'ret' is set to 1 (error code).
+ # Conversely, when all tests are passed successfully, the 'ret' value
+ # is set to 0 (success code).
+ if [ "${ret}" -ne 1 ]; then
+ ret=0
+ fi
+}
+
+log_section()
+{
+ echo
+ echo "################################################################################"
+ echo "TEST SECTION: $*"
+ echo "################################################################################"
+}
+
+test_command_or_ksft_skip()
+{
+ local cmd="$1"
+
+ if [ ! -x "$(command -v "${cmd}")" ]; then
+ echo "SKIP: Could not run test without \"${cmd}\" tool";
+ exit "${ksft_skip}"
+ fi
+}
+
+get_nodename()
+{
+ local name="$1"
+
+ echo "${name}-${RDMSUFF}"
+}
+
+get_rtname()
+{
+ local rtid="$1"
+
+ get_nodename "rt-${rtid}"
+}
+
+get_hsname()
+{
+ local hsid="$1"
+
+ get_nodename "hs-${hsid}"
+}
+
+__create_namespace()
+{
+ local name="$1"
+
+ ip netns add "${name}"
+}
+
+create_router()
+{
+ local rtid="$1"
+ local nsname
+
+ nsname="$(get_rtname "${rtid}")"
+
+ __create_namespace "${nsname}"
+}
+
+create_host()
+{
+ local hsid="$1"
+ local nsname
+
+ nsname="$(get_hsname "${hsid}")"
+
+ __create_namespace "${nsname}"
+}
+
+cleanup()
+{
+ local nsname
+ local i
+
+ # destroy routers
+ for i in ${ROUTERS}; do
+ nsname="$(get_rtname "${i}")"
+
+ ip netns del "${nsname}" &>/dev/null || true
+ done
+
+ # destroy hosts
+ for i in ${HOSTS}; do
+ nsname="$(get_hsname "${i}")"
+
+ ip netns del "${nsname}" &>/dev/null || true
+ done
+
+ # check whether the setup phase was completed successfully or not. In
+ # case of an error during the setup phase of the testing environment,
+ # the selftest is considered as "skipped".
+ if [ "${SETUP_ERR}" -ne 0 ]; then
+ echo "SKIP: Setting up the testing environment failed"
+ exit "${ksft_skip}"
+ fi
+
+ exit "${ret}"
+}
+
+add_link_rt_pairs()
+{
+ local rt="$1"
+ local rt_neighs="$2"
+ local neigh
+ local nsname
+ local neigh_nsname
+
+ nsname="$(get_rtname "${rt}")"
+
+ for neigh in ${rt_neighs}; do
+ neigh_nsname="$(get_rtname "${neigh}")"
+
+ ip link add "veth-rt-${rt}-${neigh}" netns "${nsname}" \
+ type veth peer name "veth-rt-${neigh}-${rt}" \
+ netns "${neigh_nsname}"
+ done
+}
+
+get_network_prefix()
+{
+ local rt="$1"
+ local neigh="$2"
+ local p="${rt}"
+ local q="${neigh}"
+
+ if [ "${p}" -gt "${q}" ]; then
+ p="${q}"; q="${rt}"
+ fi
+
+ echo "${IPv6_RT_NETWORK}:${p}:${q}"
+}
+
+# Setup the basic networking for the routers
+setup_rt_networking()
+{
+ local rt="$1"
+ local rt_neighs="$2"
+ local nsname
+ local net_prefix
+ local devname
+ local neigh
+
+ nsname="$(get_rtname "${rt}")"
+
+ for neigh in ${rt_neighs}; do
+ devname="veth-rt-${rt}-${neigh}"
+
+ net_prefix="$(get_network_prefix "${rt}" "${neigh}")"
+
+ ip -netns "${nsname}" addr \
+ add "${net_prefix}::${rt}/64" dev "${devname}" nodad
+
+ ip -netns "${nsname}" link set "${devname}" up
+ done
+
+ ip -netns "${nsname}" link set lo up
+
+ ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.default.accept_dad=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.all.forwarding=1
+
+ ip netns exec "${nsname}" sysctl -wq net.ipv4.conf.all.rp_filter=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv4.conf.default.rp_filter=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv4.ip_forward=1
+}
+
+# Setup local SIDs for an SRv6 router
+setup_rt_local_sids()
+{
+ local rt="$1"
+ local rt_neighs="$2"
+ local net_prefix
+ local devname
+ local nsname
+ local neigh
+
+ nsname="$(get_rtname "${rt}")"
+
+ for neigh in ${rt_neighs}; do
+ devname="veth-rt-${rt}-${neigh}"
+
+ net_prefix="$(get_network_prefix "${rt}" "${neigh}")"
+
+ # set underlay network routes for SIDs reachability
+ ip -netns "${nsname}" -6 route \
+ add "${VPN_LOCATOR_SERVICE}:${neigh}::/32" \
+ table "${LOCALSID_TABLE_ID}" \
+ via "${net_prefix}::${neigh}" dev "${devname}"
+ done
+
+ # Local End behavior (note that "dev" is dummy and the VRF is chosen
+ # for the sake of simplicity).
+ ip -netns "${nsname}" -6 route \
+ add "${VPN_LOCATOR_SERVICE}:${rt}::${END_FUNC}" \
+ table "${LOCALSID_TABLE_ID}" \
+ encap seg6local action End dev "${VRF_DEVNAME}"
+
+ # Local End.DT46 behavior
+ ip -netns "${nsname}" -6 route \
+ add "${VPN_LOCATOR_SERVICE}:${rt}::${DT46_FUNC}" \
+ table "${LOCALSID_TABLE_ID}" \
+ encap seg6local action End.DT46 vrftable "${VRF_TID}" \
+ dev "${VRF_DEVNAME}"
+
+ # all SIDs for VPNs start with a common locator. Routes and SRv6
+ # Endpoint behavior instaces are grouped together in the 'localsid'
+ # table.
+ ip -netns "${nsname}" -6 rule \
+ add to "${VPN_LOCATOR_SERVICE}::/16" \
+ lookup "${LOCALSID_TABLE_ID}" prio 999
+
+ # set default routes to unreachable for both ipv4 and ipv6
+ ip -netns "${nsname}" -6 route \
+ add unreachable default metric 4278198272 \
+ vrf "${VRF_DEVNAME}"
+
+ ip -netns "${nsname}" -4 route \
+ add unreachable default metric 4278198272 \
+ vrf "${VRF_DEVNAME}"
+}
+
+# build and install the SRv6 policy into the ingress SRv6 router.
+# args:
+# $1 - destination host (i.e. cafe::x host)
+# $2 - SRv6 router configured for enforcing the SRv6 Policy
+# $3 - SRv6 routers configured for steering traffic (End behaviors)
+# $4 - SRv6 router configured for removing the SRv6 Policy (router connected
+# to the destination host)
+# $5 - encap mode (full or red)
+# $6 - traffic type (IPv6 or IPv4)
+__setup_rt_policy()
+{
+ local dst="$1"
+ local encap_rt="$2"
+ local end_rts="$3"
+ local dec_rt="$4"
+ local mode="$5"
+ local traffic="$6"
+ local nsname
+ local policy=''
+ local n
+
+ nsname="$(get_rtname "${encap_rt}")"
+
+ for n in ${end_rts}; do
+ policy="${policy}${VPN_LOCATOR_SERVICE}:${n}::${END_FUNC},"
+ done
+
+ policy="${policy}${VPN_LOCATOR_SERVICE}:${dec_rt}::${DT46_FUNC}"
+
+ # add SRv6 policy to incoming traffic sent by connected hosts
+ if [ "${traffic}" -eq 6 ]; then
+ ip -netns "${nsname}" -6 route \
+ add "${IPv6_HS_NETWORK}::${dst}" vrf "${VRF_DEVNAME}" \
+ encap seg6 mode "${mode}" segs "${policy}" \
+ dev "${VRF_DEVNAME}"
+
+ ip -netns "${nsname}" -6 neigh \
+ add proxy "${IPv6_HS_NETWORK}::${dst}" \
+ dev "${RT2HS_DEVNAME}"
+ else
+ # "dev" must be different from the one where the packet is
+ # received, otherwise the proxy arp does not work.
+ ip -netns "${nsname}" -4 route \
+ add "${IPv4_HS_NETWORK}.${dst}" vrf "${VRF_DEVNAME}" \
+ encap seg6 mode "${mode}" segs "${policy}" \
+ dev "${VRF_DEVNAME}"
+ fi
+}
+
+# see __setup_rt_policy
+setup_rt_policy_ipv6()
+{
+ __setup_rt_policy "$1" "$2" "$3" "$4" "$5" 6
+}
+
+#see __setup_rt_policy
+setup_rt_policy_ipv4()
+{
+ __setup_rt_policy "$1" "$2" "$3" "$4" "$5" 4
+}
+
+setup_hs()
+{
+ local hs="$1"
+ local rt="$2"
+ local hsname
+ local rtname
+
+ hsname="$(get_hsname "${hs}")"
+ rtname="$(get_rtname "${rt}")"
+
+ ip netns exec "${hsname}" sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec "${hsname}" sysctl -wq net.ipv6.conf.default.accept_dad=0
+
+ ip -netns "${hsname}" link add veth0 type veth \
+ peer name "${RT2HS_DEVNAME}" netns "${rtname}"
+
+ ip -netns "${hsname}" addr \
+ add "${IPv6_HS_NETWORK}::${hs}/64" dev veth0 nodad
+ ip -netns "${hsname}" addr add "${IPv4_HS_NETWORK}.${hs}/24" dev veth0
+
+ ip -netns "${hsname}" link set veth0 up
+ ip -netns "${hsname}" link set lo up
+
+ # configure the VRF on the router which is directly connected to the
+ # source host.
+ ip -netns "${rtname}" link \
+ add "${VRF_DEVNAME}" type vrf table "${VRF_TID}"
+ ip -netns "${rtname}" link set "${VRF_DEVNAME}" up
+
+ # enslave the veth interface connecting the router with the host to the
+ # VRF in the access router
+ ip -netns "${rtname}" link \
+ set "${RT2HS_DEVNAME}" master "${VRF_DEVNAME}"
+
+ ip -netns "${rtname}" addr \
+ add "${IPv6_HS_NETWORK}::254/64" dev "${RT2HS_DEVNAME}" nodad
+ ip -netns "${rtname}" addr \
+ add "${IPv4_HS_NETWORK}.254/24" dev "${RT2HS_DEVNAME}"
+
+ ip -netns "${rtname}" link set "${RT2HS_DEVNAME}" up
+
+ ip netns exec "${rtname}" \
+ sysctl -wq net.ipv6.conf."${RT2HS_DEVNAME}".proxy_ndp=1
+ ip netns exec "${rtname}" \
+ sysctl -wq net.ipv4.conf."${RT2HS_DEVNAME}".proxy_arp=1
+
+ # disable the rp_filter otherwise the kernel gets confused about how
+ # to route decap ipv4 packets.
+ ip netns exec "${rtname}" \
+ sysctl -wq net.ipv4.conf."${RT2HS_DEVNAME}".rp_filter=0
+
+ ip netns exec "${rtname}" sh -c "echo 1 > /proc/sys/net/vrf/strict_mode"
+}
+
+setup()
+{
+ local i
+
+ # create routers
+ ROUTERS="1 2 3 4"; readonly ROUTERS
+ for i in ${ROUTERS}; do
+ create_router "${i}"
+ done
+
+ # create hosts
+ HOSTS="1 2 3 4"; readonly HOSTS
+ for i in ${HOSTS}; do
+ create_host "${i}"
+ done
+
+ # set up the links for connecting routers
+ add_link_rt_pairs 1 "2 3 4"
+ add_link_rt_pairs 2 "3 4"
+ add_link_rt_pairs 3 "4"
+
+ # set up the basic connectivity of routers and routes required for
+ # reachability of SIDs.
+ setup_rt_networking 1 "2 3 4"
+ setup_rt_networking 2 "1 3 4"
+ setup_rt_networking 3 "1 2 4"
+ setup_rt_networking 4 "1 2 3"
+
+ # set up the hosts connected to routers
+ setup_hs 1 1
+ setup_hs 2 2
+ setup_hs 3 3
+ setup_hs 4 4
+
+ # set up default SRv6 Endpoints (i.e. SRv6 End and SRv6 End.DT46)
+ setup_rt_local_sids 1 "2 3 4"
+ setup_rt_local_sids 2 "1 3 4"
+ setup_rt_local_sids 3 "1 2 4"
+ setup_rt_local_sids 4 "1 2 3"
+
+ # set up SRv6 policies
+
+ # create an IPv6 VPN between hosts hs-1 and hs-2.
+ # the network path between hs-1 and hs-2 traverses several routers
+ # depending on the direction of traffic.
+ #
+ # Direction hs-1 -> hs-2 (H.Encaps.Red)
+ # - rt-3,rt-4 (SRv6 End behaviors)
+ # - rt-2 (SRv6 End.DT46 behavior)
+ #
+ # Direction hs-2 -> hs-1 (H.Encaps.Red)
+ # - rt-1 (SRv6 End.DT46 behavior)
+ setup_rt_policy_ipv6 2 1 "3 4" 2 encap.red
+ setup_rt_policy_ipv6 1 2 "" 1 encap.red
+
+ # create an IPv4 VPN between hosts hs-1 and hs-2
+ # the network path between hs-1 and hs-2 traverses several routers
+ # depending on the direction of traffic.
+ #
+ # Direction hs-1 -> hs-2 (H.Encaps.Red)
+ # - rt-2 (SRv6 End.DT46 behavior)
+ #
+ # Direction hs-2 -> hs-1 (H.Encaps.Red)
+ # - rt-4,rt-3 (SRv6 End behaviors)
+ # - rt-1 (SRv6 End.DT46 behavior)
+ setup_rt_policy_ipv4 2 1 "" 2 encap.red
+ setup_rt_policy_ipv4 1 2 "4 3" 1 encap.red
+
+ # create an IPv6 VPN between hosts hs-3 and hs-4
+ # the network path between hs-3 and hs-4 traverses several routers
+ # depending on the direction of traffic.
+ #
+ # Direction hs-3 -> hs-4 (H.Encaps.Red)
+ # - rt-2 (SRv6 End Behavior)
+ # - rt-4 (SRv6 End.DT46 behavior)
+ #
+ # Direction hs-4 -> hs-3 (H.Encaps.Red)
+ # - rt-1 (SRv6 End behavior)
+ # - rt-3 (SRv6 End.DT46 behavior)
+ setup_rt_policy_ipv6 4 3 "2" 4 encap.red
+ setup_rt_policy_ipv6 3 4 "1" 3 encap.red
+
+ # testing environment was set up successfully
+ SETUP_ERR=0
+}
+
+check_rt_connectivity()
+{
+ local rtsrc="$1"
+ local rtdst="$2"
+ local prefix
+ local rtsrc_nsname
+
+ rtsrc_nsname="$(get_rtname "${rtsrc}")"
+
+ prefix="$(get_network_prefix "${rtsrc}" "${rtdst}")"
+
+ ip netns exec "${rtsrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+ "${prefix}::${rtdst}" >/dev/null 2>&1
+}
+
+check_and_log_rt_connectivity()
+{
+ local rtsrc="$1"
+ local rtdst="$2"
+
+ check_rt_connectivity "${rtsrc}" "${rtdst}"
+ log_test $? 0 "Routers connectivity: rt-${rtsrc} -> rt-${rtdst}"
+}
+
+check_hs_ipv6_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+ local hssrc_nsname
+
+ hssrc_nsname="$(get_hsname "${hssrc}")"
+
+ ip netns exec "${hssrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+ "${IPv6_HS_NETWORK}::${hsdst}" >/dev/null 2>&1
+}
+
+check_hs_ipv4_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+ local hssrc_nsname
+
+ hssrc_nsname="$(get_hsname "${hssrc}")"
+
+ ip netns exec "${hssrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+ "${IPv4_HS_NETWORK}.${hsdst}" >/dev/null 2>&1
+}
+
+check_and_log_hs2gw_connectivity()
+{
+ local hssrc="$1"
+
+ check_hs_ipv6_connectivity "${hssrc}" 254
+ log_test $? 0 "IPv6 Hosts connectivity: hs-${hssrc} -> gw"
+
+ check_hs_ipv4_connectivity "${hssrc}" 254
+ log_test $? 0 "IPv4 Hosts connectivity: hs-${hssrc} -> gw"
+}
+
+check_and_log_hs_ipv6_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ check_hs_ipv6_connectivity "${hssrc}" "${hsdst}"
+ log_test $? 0 "IPv6 Hosts connectivity: hs-${hssrc} -> hs-${hsdst}"
+}
+
+check_and_log_hs_ipv4_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ check_hs_ipv4_connectivity "${hssrc}" "${hsdst}"
+ log_test $? 0 "IPv4 Hosts connectivity: hs-${hssrc} -> hs-${hsdst}"
+}
+
+check_and_log_hs_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ check_and_log_hs_ipv4_connectivity "${hssrc}" "${hsdst}"
+ check_and_log_hs_ipv6_connectivity "${hssrc}" "${hsdst}"
+}
+
+check_and_log_hs_ipv6_isolation()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ # in this case, the connectivity test must fail
+ check_hs_ipv6_connectivity "${hssrc}" "${hsdst}"
+ log_test $? 1 "IPv6 Hosts isolation: hs-${hssrc} -X-> hs-${hsdst}"
+}
+
+check_and_log_hs_ipv4_isolation()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ # in this case, the connectivity test must fail
+ check_hs_ipv4_connectivity "${hssrc}" "${hsdst}"
+ log_test $? 1 "IPv4 Hosts isolation: hs-${hssrc} -X-> hs-${hsdst}"
+}
+
+check_and_log_hs_isolation()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ check_and_log_hs_ipv6_isolation "${hssrc}" "${hsdst}"
+ check_and_log_hs_ipv4_isolation "${hssrc}" "${hsdst}"
+}
+
+router_tests()
+{
+ local i
+ local j
+
+ log_section "IPv6 routers connectivity test"
+
+ for i in ${ROUTERS}; do
+ for j in ${ROUTERS}; do
+ if [ "${i}" -eq "${j}" ]; then
+ continue
+ fi
+
+ check_and_log_rt_connectivity "${i}" "${j}"
+ done
+ done
+}
+
+host2gateway_tests()
+{
+ local hs
+
+ log_section "IPv4/IPv6 connectivity test among hosts and gateways"
+
+ for hs in ${HOSTS}; do
+ check_and_log_hs2gw_connectivity "${hs}"
+ done
+}
+
+host_vpn_tests()
+{
+ log_section "SRv6 VPN connectivity test hosts (h1 <-> h2, IPv4/IPv6)"
+
+ check_and_log_hs_connectivity 1 2
+ check_and_log_hs_connectivity 2 1
+
+ log_section "SRv6 VPN connectivity test hosts (h3 <-> h4, IPv6 only)"
+
+ check_and_log_hs_ipv6_connectivity 3 4
+ check_and_log_hs_ipv6_connectivity 4 3
+}
+
+host_vpn_isolation_tests()
+{
+ local l1="1 2"
+ local l2="3 4"
+ local tmp
+ local i
+ local j
+ local k
+
+ log_section "SRv6 VPN isolation test among hosts"
+
+ for k in 0 1; do
+ for i in ${l1}; do
+ for j in ${l2}; do
+ check_and_log_hs_isolation "${i}" "${j}"
+ done
+ done
+
+ # let us test the reverse path
+ tmp="${l1}"; l1="${l2}"; l2="${tmp}"
+ done
+
+ log_section "SRv6 VPN isolation test among hosts (h2 <-> h4, IPv4 only)"
+
+ check_and_log_hs_ipv4_isolation 2 4
+ check_and_log_hs_ipv4_isolation 4 2
+}
+
+test_iproute2_supp_or_ksft_skip()
+{
+ if ! ip route help 2>&1 | grep -qo "encap.red"; then
+ echo "SKIP: Missing SRv6 encap.red support in iproute2"
+ exit "${ksft_skip}"
+ fi
+}
+
+test_vrf_or_ksft_skip()
+{
+ modprobe vrf &>/dev/null || true
+ if [ ! -e /proc/sys/net/vrf/strict_mode ]; then
+ echo "SKIP: vrf sysctl does not exist"
+ exit "${ksft_skip}"
+ fi
+}
+
+if [ "$(id -u)" -ne 0 ]; then
+ echo "SKIP: Need root privileges"
+ exit "${ksft_skip}"
+fi
+
+# required programs to carry out this selftest
+test_command_or_ksft_skip ip
+test_command_or_ksft_skip ping
+test_command_or_ksft_skip sysctl
+test_command_or_ksft_skip grep
+
+test_iproute2_supp_or_ksft_skip
+test_vrf_or_ksft_skip
+
+set -e
+trap cleanup EXIT
+
+setup
+set +e
+
+router_tests
+host2gateway_tests
+host_vpn_tests
+host_vpn_isolation_tests
+
+print_log_test_results
diff --git a/tools/testing/selftests/net/srv6_hl2encap_red_l2vpn_test.sh b/tools/testing/selftests/net/srv6_hl2encap_red_l2vpn_test.sh
new file mode 100755
index 000000000000..cb4177d41b21
--- /dev/null
+++ b/tools/testing/selftests/net/srv6_hl2encap_red_l2vpn_test.sh
@@ -0,0 +1,821 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# author: Andrea Mayer <andrea.mayer@uniroma2.it>
+#
+# This script is designed for testing the SRv6 H.L2Encaps.Red behavior.
+#
+# Below is depicted the IPv6 network of an operator which offers L2 VPN
+# services to hosts, enabling them to communicate with each other.
+# In this example, hosts hs-1 and hs-2 are connected through an L2 VPN service.
+# Currently, the SRv6 subsystem in Linux allows hosts hs-1 and hs-2 to exchange
+# full L2 frames as long as they carry IPv4/IPv6.
+#
+# Routers rt-1,rt-2,rt-3 and rt-4 implement L2 VPN services
+# leveraging the SRv6 architecture. The key components for such VPNs are:
+#
+# i) The SRv6 H.L2Encaps.Red behavior applies SRv6 Policies on traffic
+# received by connected hosts, initiating the VPN tunnel. Such a behavior
+# is an optimization of the SRv6 H.L2Encap aiming to reduce the
+# length of the SID List carried in the pushed SRH. Specifically, the
+# H.L2Encaps.Red removes the first SID contained in the SID List (i.e. SRv6
+# Policy) by storing it into the IPv6 Destination Address. When a SRv6
+# Policy is made of only one SID, the SRv6 H.L2Encaps.Red behavior omits
+# the SRH at all and pushes that SID directly into the IPv6 DA;
+#
+# ii) The SRv6 End behavior advances the active SID in the SID List
+# carried by the SRH;
+#
+# iii) The SRv6 End.DX2 behavior is used for removing the SRv6 Policy
+# and, thus, it terminates the VPN tunnel. The decapsulated L2 frame is
+# sent over the interface connected with the destination host.
+#
+# cafe::1 cafe::2
+# 10.0.0.1 10.0.0.2
+# +--------+ +--------+
+# | | | |
+# | hs-1 | | hs-2 |
+# | | | |
+# +---+----+ +--- +---+
+# cafe::/64 | | cafe::/64
+# 10.0.0.0/24 | | 10.0.0.0/24
+# +---+----+ +----+---+
+# | | fcf0:0:1:2::/64 | |
+# | rt-1 +-------------------+ rt-2 |
+# | | | |
+# +---+----+ +----+---+
+# | . . |
+# | fcf0:0:1:3::/64 . |
+# | . . |
+# | . . |
+# fcf0:0:1:4::/64 | . | fcf0:0:2:3::/64
+# | . . |
+# | . . |
+# | fcf0:0:2:4::/64 . |
+# | . . |
+# +---+----+ +----+---+
+# | | | |
+# | rt-4 +-------------------+ rt-3 |
+# | | fcf0:0:3:4::/64 | |
+# +---+----+ +----+---+
+#
+#
+# Every fcf0:0:x:y::/64 network interconnects the SRv6 routers rt-x with rt-y
+# in the IPv6 operator network.
+#
+# Local SID table
+# ===============
+#
+# Each SRv6 router is configured with a Local SID table in which SIDs are
+# stored. Considering the given SRv6 router rt-x, at least two SIDs are
+# configured in the Local SID table:
+#
+# Local SID table for SRv6 router rt-x
+# +----------------------------------------------------------+
+# |fcff:x::e is associated with the SRv6 End behavior |
+# |fcff:x::d2 is associated with the SRv6 End.DX2 behavior |
+# +----------------------------------------------------------+
+#
+# The fcff::/16 prefix is reserved by the operator for implementing SRv6 VPN
+# services. Reachability of SIDs is ensured by proper configuration of the IPv6
+# operator's network and SRv6 routers.
+#
+# SRv6 Policies
+# =============
+#
+# An SRv6 ingress router applies SRv6 policies to the traffic received from a
+# connected host. SRv6 policy enforcement consists of encapsulating the
+# received traffic into a new IPv6 packet with a given SID List contained in
+# the SRH.
+#
+# L2 VPN between hs-1 and hs-2
+# ----------------------------
+#
+# Hosts hs-1 and hs-2 are connected using a dedicated L2 VPN.
+# Specifically, packets generated from hs-1 and directed towards hs-2 are
+# handled by rt-1 which applies the following SRv6 Policies:
+#
+# i.a) L2 traffic, SID List=fcff:2::d2
+#
+# Policy (i.a) steers tunneled L2 traffic through SRv6 router rt-2.
+# The H.L2Encaps.Red omits the presence of SRH at all, since the SID List
+# consists of only one SID (fcff:2::d2) that can be stored directly in the IPv6
+# DA.
+#
+# On the reverse path (i.e. from hs-2 to hs-1), rt-2 applies the following
+# policies:
+#
+# i.b) L2 traffic, SID List=fcff:4::e,fcff:3::e,fcff:1::d2
+#
+# Policy (i.b) steers tunneled L2 traffic through the SRv6 routers
+# rt-4,rt-3,rt2. The H.L2Encaps.Red reduces the SID List in the SRH by removing
+# the first SID (fcff:4::e) and pushing it into the IPv6 DA.
+#
+# In summary:
+# hs-1->hs-2 |IPv6 DA=fcff:2::d2|eth|...| (i.a)
+# hs-2->hs-1 |IPv6 DA=fcff:4::e|SRH SIDs=fcff:3::e,fcff:1::d2|eth|...| (i.b)
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+readonly ksft_skip=4
+
+readonly RDMSUFF="$(mktemp -u XXXXXXXX)"
+readonly DUMMY_DEVNAME="dum0"
+readonly RT2HS_DEVNAME="veth-hs"
+readonly HS_VETH_NAME="veth0"
+readonly LOCALSID_TABLE_ID=90
+readonly IPv6_RT_NETWORK=fcf0:0
+readonly IPv6_HS_NETWORK=cafe
+readonly IPv4_HS_NETWORK=10.0.0
+readonly VPN_LOCATOR_SERVICE=fcff
+readonly MAC_PREFIX=00:00:00:c0:01
+readonly END_FUNC=000e
+readonly DX2_FUNC=00d2
+
+PING_TIMEOUT_SEC=4
+PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
+
+# IDs of routers and hosts are initialized during the setup of the testing
+# network
+ROUTERS=''
+HOSTS=''
+
+SETUP_ERR=1
+
+ret=${ksft_skip}
+nsuccess=0
+nfail=0
+
+log_test()
+{
+ local rc="$1"
+ local expected="$2"
+ local msg="$3"
+
+ if [ "${rc}" -eq "${expected}" ]; then
+ nsuccess=$((nsuccess+1))
+ printf "\n TEST: %-60s [ OK ]\n" "${msg}"
+ else
+ ret=1
+ nfail=$((nfail+1))
+ printf "\n TEST: %-60s [FAIL]\n" "${msg}"
+ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+ echo
+ echo "hit enter to continue, 'q' to quit"
+ read a
+ [ "$a" = "q" ] && exit 1
+ fi
+ fi
+}
+
+print_log_test_results()
+{
+ printf "\nTests passed: %3d\n" "${nsuccess}"
+ printf "Tests failed: %3d\n" "${nfail}"
+
+ # when a test fails, the value of 'ret' is set to 1 (error code).
+ # Conversely, when all tests are passed successfully, the 'ret' value
+ # is set to 0 (success code).
+ if [ "${ret}" -ne 1 ]; then
+ ret=0
+ fi
+}
+
+log_section()
+{
+ echo
+ echo "################################################################################"
+ echo "TEST SECTION: $*"
+ echo "################################################################################"
+}
+
+test_command_or_ksft_skip()
+{
+ local cmd="$1"
+
+ if [ ! -x "$(command -v "${cmd}")" ]; then
+ echo "SKIP: Could not run test without \"${cmd}\" tool";
+ exit "${ksft_skip}"
+ fi
+}
+
+get_nodename()
+{
+ local name="$1"
+
+ echo "${name}-${RDMSUFF}"
+}
+
+get_rtname()
+{
+ local rtid="$1"
+
+ get_nodename "rt-${rtid}"
+}
+
+get_hsname()
+{
+ local hsid="$1"
+
+ get_nodename "hs-${hsid}"
+}
+
+__create_namespace()
+{
+ local name="$1"
+
+ ip netns add "${name}"
+}
+
+create_router()
+{
+ local rtid="$1"
+ local nsname
+
+ nsname="$(get_rtname "${rtid}")"
+
+ __create_namespace "${nsname}"
+}
+
+create_host()
+{
+ local hsid="$1"
+ local nsname
+
+ nsname="$(get_hsname "${hsid}")"
+
+ __create_namespace "${nsname}"
+}
+
+cleanup()
+{
+ local nsname
+ local i
+
+ # destroy routers
+ for i in ${ROUTERS}; do
+ nsname="$(get_rtname "${i}")"
+
+ ip netns del "${nsname}" &>/dev/null || true
+ done
+
+ # destroy hosts
+ for i in ${HOSTS}; do
+ nsname="$(get_hsname "${i}")"
+
+ ip netns del "${nsname}" &>/dev/null || true
+ done
+
+ # check whether the setup phase was completed successfully or not. In
+ # case of an error during the setup phase of the testing environment,
+ # the selftest is considered as "skipped".
+ if [ "${SETUP_ERR}" -ne 0 ]; then
+ echo "SKIP: Setting up the testing environment failed"
+ exit "${ksft_skip}"
+ fi
+
+ exit "${ret}"
+}
+
+add_link_rt_pairs()
+{
+ local rt="$1"
+ local rt_neighs="$2"
+ local neigh
+ local nsname
+ local neigh_nsname
+
+ nsname="$(get_rtname "${rt}")"
+
+ for neigh in ${rt_neighs}; do
+ neigh_nsname="$(get_rtname "${neigh}")"
+
+ ip link add "veth-rt-${rt}-${neigh}" netns "${nsname}" \
+ type veth peer name "veth-rt-${neigh}-${rt}" \
+ netns "${neigh_nsname}"
+ done
+}
+
+get_network_prefix()
+{
+ local rt="$1"
+ local neigh="$2"
+ local p="${rt}"
+ local q="${neigh}"
+
+ if [ "${p}" -gt "${q}" ]; then
+ p="${q}"; q="${rt}"
+ fi
+
+ echo "${IPv6_RT_NETWORK}:${p}:${q}"
+}
+
+# Setup the basic networking for the routers
+setup_rt_networking()
+{
+ local rt="$1"
+ local rt_neighs="$2"
+ local nsname
+ local net_prefix
+ local devname
+ local neigh
+
+ nsname="$(get_rtname "${rt}")"
+
+ for neigh in ${rt_neighs}; do
+ devname="veth-rt-${rt}-${neigh}"
+
+ net_prefix="$(get_network_prefix "${rt}" "${neigh}")"
+
+ ip -netns "${nsname}" addr \
+ add "${net_prefix}::${rt}/64" dev "${devname}" nodad
+
+ ip -netns "${nsname}" link set "${devname}" up
+ done
+
+ ip -netns "${nsname}" link add "${DUMMY_DEVNAME}" type dummy
+
+ ip -netns "${nsname}" link set "${DUMMY_DEVNAME}" up
+ ip -netns "${nsname}" link set lo up
+
+ ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.default.accept_dad=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv6.conf.all.forwarding=1
+
+ ip netns exec "${nsname}" sysctl -wq net.ipv4.conf.all.rp_filter=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv4.conf.default.rp_filter=0
+ ip netns exec "${nsname}" sysctl -wq net.ipv4.ip_forward=1
+}
+
+# Setup local SIDs for an SRv6 router
+setup_rt_local_sids()
+{
+ local rt="$1"
+ local rt_neighs="$2"
+ local net_prefix
+ local devname
+ local nsname
+ local neigh
+
+ nsname="$(get_rtname "${rt}")"
+
+ for neigh in ${rt_neighs}; do
+ devname="veth-rt-${rt}-${neigh}"
+
+ net_prefix="$(get_network_prefix "${rt}" "${neigh}")"
+
+ # set underlay network routes for SIDs reachability
+ ip -netns "${nsname}" -6 route \
+ add "${VPN_LOCATOR_SERVICE}:${neigh}::/32" \
+ table "${LOCALSID_TABLE_ID}" \
+ via "${net_prefix}::${neigh}" dev "${devname}"
+ done
+
+ # Local End behavior (note that dev "${DUMMY_DEVNAME}" is a dummy
+ # interface)
+ ip -netns "${nsname}" -6 route \
+ add "${VPN_LOCATOR_SERVICE}:${rt}::${END_FUNC}" \
+ table "${LOCALSID_TABLE_ID}" \
+ encap seg6local action End dev "${DUMMY_DEVNAME}"
+
+ # all SIDs for VPNs start with a common locator. Routes and SRv6
+ # Endpoint behaviors instaces are grouped together in the 'localsid'
+ # table.
+ ip -netns "${nsname}" -6 rule add \
+ to "${VPN_LOCATOR_SERVICE}::/16" \
+ lookup "${LOCALSID_TABLE_ID}" prio 999
+}
+
+# build and install the SRv6 policy into the ingress SRv6 router.
+# args:
+# $1 - destination host (i.e. cafe::x host)
+# $2 - SRv6 router configured for enforcing the SRv6 Policy
+# $3 - SRv6 routers configured for steering traffic (End behaviors)
+# $4 - SRv6 router configured for removing the SRv6 Policy (router connected
+# to the destination host)
+# $5 - encap mode (full or red)
+# $6 - traffic type (IPv6 or IPv4)
+__setup_rt_policy()
+{
+ local dst="$1"
+ local encap_rt="$2"
+ local end_rts="$3"
+ local dec_rt="$4"
+ local mode="$5"
+ local traffic="$6"
+ local nsname
+ local policy=''
+ local n
+
+ nsname="$(get_rtname "${encap_rt}")"
+
+ for n in ${end_rts}; do
+ policy="${policy}${VPN_LOCATOR_SERVICE}:${n}::${END_FUNC},"
+ done
+
+ policy="${policy}${VPN_LOCATOR_SERVICE}:${dec_rt}::${DX2_FUNC}"
+
+ # add SRv6 policy to incoming traffic sent by connected hosts
+ if [ "${traffic}" -eq 6 ]; then
+ ip -netns "${nsname}" -6 route \
+ add "${IPv6_HS_NETWORK}::${dst}" \
+ encap seg6 mode "${mode}" segs "${policy}" \
+ dev dum0
+ else
+ ip -netns "${nsname}" -4 route \
+ add "${IPv4_HS_NETWORK}.${dst}" \
+ encap seg6 mode "${mode}" segs "${policy}" \
+ dev dum0
+ fi
+}
+
+# see __setup_rt_policy
+setup_rt_policy_ipv6()
+{
+ __setup_rt_policy "$1" "$2" "$3" "$4" "$5" 6
+}
+
+#see __setup_rt_policy
+setup_rt_policy_ipv4()
+{
+ __setup_rt_policy "$1" "$2" "$3" "$4" "$5" 4
+}
+
+setup_decap()
+{
+ local rt="$1"
+ local nsname
+
+ nsname="$(get_rtname "${rt}")"
+
+ # Local End.DX2 behavior
+ ip -netns "${nsname}" -6 route \
+ add "${VPN_LOCATOR_SERVICE}:${rt}::${DX2_FUNC}" \
+ table "${LOCALSID_TABLE_ID}" \
+ encap seg6local action End.DX2 oif "${RT2HS_DEVNAME}" \
+ dev "${RT2HS_DEVNAME}"
+}
+
+setup_hs()
+{
+ local hs="$1"
+ local rt="$2"
+ local hsname
+ local rtname
+
+ hsname="$(get_hsname "${hs}")"
+ rtname="$(get_rtname "${rt}")"
+
+ ip netns exec "${hsname}" sysctl -wq net.ipv6.conf.all.accept_dad=0
+ ip netns exec "${hsname}" sysctl -wq net.ipv6.conf.default.accept_dad=0
+
+ ip -netns "${hsname}" link add "${HS_VETH_NAME}" type veth \
+ peer name "${RT2HS_DEVNAME}" netns "${rtname}"
+
+ ip -netns "${hsname}" addr add "${IPv6_HS_NETWORK}::${hs}/64" \
+ dev "${HS_VETH_NAME}" nodad
+ ip -netns "${hsname}" addr add "${IPv4_HS_NETWORK}.${hs}/24" \
+ dev "${HS_VETH_NAME}"
+
+ ip -netns "${hsname}" link set "${HS_VETH_NAME}" up
+ ip -netns "${hsname}" link set lo up
+
+ ip -netns "${rtname}" addr add "${IPv6_HS_NETWORK}::254/64" \
+ dev "${RT2HS_DEVNAME}" nodad
+ ip -netns "${rtname}" addr \
+ add "${IPv4_HS_NETWORK}.254/24" dev "${RT2HS_DEVNAME}"
+
+ ip -netns "${rtname}" link set "${RT2HS_DEVNAME}" up
+
+ # disable the rp_filter otherwise the kernel gets confused about how
+ # to route decap ipv4 packets.
+ ip netns exec "${rtname}" \
+ sysctl -wq net.ipv4.conf."${RT2HS_DEVNAME}".rp_filter=0
+}
+
+# set an auto-generated mac address
+# args:
+# $1 - name of the node (e.g.: hs-1, rt-3, etc)
+# $2 - id of the node (e.g.: 1 for hs-1, 3 for rt-3, etc)
+# $3 - host part of the IPv6 network address
+# $4 - name of the network interface to which the generated mac address must
+# be set.
+set_mac_address()
+{
+ local nodename="$1"
+ local nodeid="$2"
+ local host="$3"
+ local ifname="$4"
+ local nsname
+
+ nsname=$(get_nodename "${nodename}")
+
+ ip -netns "${nsname}" link set dev "${ifname}" down
+
+ ip -netns "${nsname}" link set address "${MAC_PREFIX}:${nodeid}" \
+ dev "${ifname}"
+
+ # the IPv6 address must be set once again after the MAC address has
+ # been changed.
+ ip -netns "${nsname}" addr add "${IPv6_HS_NETWORK}::${host}/64" \
+ dev "${ifname}" nodad
+
+ ip -netns "${nsname}" link set dev "${ifname}" up
+}
+
+set_host_l2peer()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+ local ipprefix="$3"
+ local proto="$4"
+ local hssrc_name
+ local ipaddr
+
+ hssrc_name="$(get_hsname "${hssrc}")"
+
+ if [ "${proto}" -eq 6 ]; then
+ ipaddr="${ipprefix}::${hsdst}"
+ else
+ ipaddr="${ipprefix}.${hsdst}"
+ fi
+
+ ip -netns "${hssrc_name}" route add "${ipaddr}" dev "${HS_VETH_NAME}"
+
+ ip -netns "${hssrc_name}" neigh \
+ add "${ipaddr}" lladdr "${MAC_PREFIX}:${hsdst}" \
+ dev "${HS_VETH_NAME}"
+}
+
+# setup an SRv6 L2 VPN between host hs-x and hs-y (currently, the SRv6
+# subsystem only supports L2 frames whose layer-3 is IPv4/IPv6).
+# args:
+# $1 - source host
+# $2 - SRv6 routers configured for steering tunneled traffic
+# $3 - destination host
+setup_l2vpn()
+{
+ local hssrc="$1"
+ local end_rts="$2"
+ local hsdst="$3"
+ local rtsrc="${hssrc}"
+ local rtdst="${hsdst}"
+
+ # set fixed mac for source node and the neigh MAC address
+ set_mac_address "hs-${hssrc}" "${hssrc}" "${hssrc}" "${HS_VETH_NAME}"
+ set_host_l2peer "${hssrc}" "${hsdst}" "${IPv6_HS_NETWORK}" 6
+ set_host_l2peer "${hssrc}" "${hsdst}" "${IPv4_HS_NETWORK}" 4
+
+ # we have to set the mac address of the veth-host (on ingress router)
+ # to the mac address of the remote peer (L2 VPN destination host).
+ # Otherwise, traffic coming from the source host is dropped at the
+ # ingress router.
+ set_mac_address "rt-${rtsrc}" "${hsdst}" 254 "${RT2HS_DEVNAME}"
+
+ # set the SRv6 Policies at the ingress router
+ setup_rt_policy_ipv6 "${hsdst}" "${rtsrc}" "${end_rts}" "${rtdst}" \
+ l2encap.red 6
+ setup_rt_policy_ipv4 "${hsdst}" "${rtsrc}" "${end_rts}" "${rtdst}" \
+ l2encap.red 4
+
+ # set the decap behavior
+ setup_decap "${rtsrc}"
+}
+
+setup()
+{
+ local i
+
+ # create routers
+ ROUTERS="1 2 3 4"; readonly ROUTERS
+ for i in ${ROUTERS}; do
+ create_router "${i}"
+ done
+
+ # create hosts
+ HOSTS="1 2"; readonly HOSTS
+ for i in ${HOSTS}; do
+ create_host "${i}"
+ done
+
+ # set up the links for connecting routers
+ add_link_rt_pairs 1 "2 3 4"
+ add_link_rt_pairs 2 "3 4"
+ add_link_rt_pairs 3 "4"
+
+ # set up the basic connectivity of routers and routes required for
+ # reachability of SIDs.
+ setup_rt_networking 1 "2 3 4"
+ setup_rt_networking 2 "1 3 4"
+ setup_rt_networking 3 "1 2 4"
+ setup_rt_networking 4 "1 2 3"
+
+ # set up the hosts connected to routers
+ setup_hs 1 1
+ setup_hs 2 2
+
+ # set up default SRv6 Endpoints (i.e. SRv6 End and SRv6 End.DX2)
+ setup_rt_local_sids 1 "2 3 4"
+ setup_rt_local_sids 2 "1 3 4"
+ setup_rt_local_sids 3 "1 2 4"
+ setup_rt_local_sids 4 "1 2 3"
+
+ # create a L2 VPN between hs-1 and hs-2.
+ # NB: currently, H.L2Encap* enables tunneling of L2 frames whose
+ # layer-3 is IPv4/IPv6.
+ #
+ # the network path between hs-1 and hs-2 traverses several routers
+ # depending on the direction of traffic.
+ #
+ # Direction hs-1 -> hs-2 (H.L2Encaps.Red)
+ # - rt-2 (SRv6 End.DX2 behavior)
+ #
+ # Direction hs-2 -> hs-1 (H.L2Encaps.Red)
+ # - rt-4,rt-3 (SRv6 End behaviors)
+ # - rt-1 (SRv6 End.DX2 behavior)
+ setup_l2vpn 1 "" 2
+ setup_l2vpn 2 "4 3" 1
+
+ # testing environment was set up successfully
+ SETUP_ERR=0
+}
+
+check_rt_connectivity()
+{
+ local rtsrc="$1"
+ local rtdst="$2"
+ local prefix
+ local rtsrc_nsname
+
+ rtsrc_nsname="$(get_rtname "${rtsrc}")"
+
+ prefix="$(get_network_prefix "${rtsrc}" "${rtdst}")"
+
+ ip netns exec "${rtsrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+ "${prefix}::${rtdst}" >/dev/null 2>&1
+}
+
+check_and_log_rt_connectivity()
+{
+ local rtsrc="$1"
+ local rtdst="$2"
+
+ check_rt_connectivity "${rtsrc}" "${rtdst}"
+ log_test $? 0 "Routers connectivity: rt-${rtsrc} -> rt-${rtdst}"
+}
+
+check_hs_ipv6_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+ local hssrc_nsname
+
+ hssrc_nsname="$(get_hsname "${hssrc}")"
+
+ ip netns exec "${hssrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+ "${IPv6_HS_NETWORK}::${hsdst}" >/dev/null 2>&1
+}
+
+check_hs_ipv4_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+ local hssrc_nsname
+
+ hssrc_nsname="$(get_hsname "${hssrc}")"
+
+ ip netns exec "${hssrc_nsname}" ping -c 1 -W "${PING_TIMEOUT_SEC}" \
+ "${IPv4_HS_NETWORK}.${hsdst}" >/dev/null 2>&1
+}
+
+check_and_log_hs2gw_connectivity()
+{
+ local hssrc="$1"
+
+ check_hs_ipv6_connectivity "${hssrc}" 254
+ log_test $? 0 "IPv6 Hosts connectivity: hs-${hssrc} -> gw"
+
+ check_hs_ipv4_connectivity "${hssrc}" 254
+ log_test $? 0 "IPv4 Hosts connectivity: hs-${hssrc} -> gw"
+}
+
+check_and_log_hs_ipv6_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ check_hs_ipv6_connectivity "${hssrc}" "${hsdst}"
+ log_test $? 0 "IPv6 Hosts connectivity: hs-${hssrc} -> hs-${hsdst}"
+}
+
+check_and_log_hs_ipv4_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ check_hs_ipv4_connectivity "${hssrc}" "${hsdst}"
+ log_test $? 0 "IPv4 Hosts connectivity: hs-${hssrc} -> hs-${hsdst}"
+}
+
+check_and_log_hs_connectivity()
+{
+ local hssrc="$1"
+ local hsdst="$2"
+
+ check_and_log_hs_ipv4_connectivity "${hssrc}" "${hsdst}"
+ check_and_log_hs_ipv6_connectivity "${hssrc}" "${hsdst}"
+}
+
+router_tests()
+{
+ local i
+ local j
+
+ log_section "IPv6 routers connectivity test"
+
+ for i in ${ROUTERS}; do
+ for j in ${ROUTERS}; do
+ if [ "${i}" -eq "${j}" ]; then
+ continue
+ fi
+
+ check_and_log_rt_connectivity "${i}" "${j}"
+ done
+ done
+}
+
+host2gateway_tests()
+{
+ local hs
+
+ log_section "IPv4/IPv6 connectivity test among hosts and gateways"
+
+ for hs in ${HOSTS}; do
+ check_and_log_hs2gw_connectivity "${hs}"
+ done
+}
+
+host_vpn_tests()
+{
+ log_section "SRv6 L2 VPN connectivity test hosts (h1 <-> h2)"
+
+ check_and_log_hs_connectivity 1 2
+ check_and_log_hs_connectivity 2 1
+}
+
+test_dummy_dev_or_ksft_skip()
+{
+ local test_netns
+
+ test_netns="dummy-$(mktemp -u XXXXXXXX)"
+
+ if ! ip netns add "${test_netns}"; then
+ echo "SKIP: Cannot set up netns for testing dummy dev support"
+ exit "${ksft_skip}"
+ fi
+
+ modprobe dummy &>/dev/null || true
+ if ! ip -netns "${test_netns}" link \
+ add "${DUMMY_DEVNAME}" type dummy; then
+ echo "SKIP: dummy dev not supported"
+
+ ip netns del "${test_netns}"
+ exit "${ksft_skip}"
+ fi
+
+ ip netns del "${test_netns}"
+}
+
+test_iproute2_supp_or_ksft_skip()
+{
+ if ! ip route help 2>&1 | grep -qo "l2encap.red"; then
+ echo "SKIP: Missing SRv6 l2encap.red support in iproute2"
+ exit "${ksft_skip}"
+ fi
+}
+
+if [ "$(id -u)" -ne 0 ]; then
+ echo "SKIP: Need root privileges"
+ exit "${ksft_skip}"
+fi
+
+# required programs to carry out this selftest
+test_command_or_ksft_skip ip
+test_command_or_ksft_skip ping
+test_command_or_ksft_skip sysctl
+test_command_or_ksft_skip grep
+
+test_iproute2_supp_or_ksft_skip
+test_dummy_dev_or_ksft_skip
+
+set -e
+trap cleanup EXIT
+
+setup
+set +e
+
+router_tests
+host2gateway_tests
+host_vpn_tests
+
+print_log_test_results
diff --git a/tools/testing/selftests/net/stress_reuseport_listen.c b/tools/testing/selftests/net/stress_reuseport_listen.c
new file mode 100644
index 000000000000..ef800bb35a8e
--- /dev/null
+++ b/tools/testing/selftests/net/stress_reuseport_listen.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+/* Test listening on the same port 443 with multiple VIPS.
+ * Each VIP:443 will have multiple sk listening on by using
+ * SO_REUSEPORT.
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <error.h>
+#include <errno.h>
+#include <time.h>
+#include <arpa/inet.h>
+
+#define IP6_LADDR_START "2401:dead::1"
+#define IP6_LPORT 443
+#define NSEC_PER_SEC 1000000000L
+#define NSEC_PER_USEC 1000L
+
+static unsigned int nr_socks_per_vip;
+static unsigned int nr_vips;
+
+static int *bind_reuseport_sock6(void)
+{
+ int *lfds, *cur_fd, err, optvalue = 1;
+ struct sockaddr_in6 sa6 = {};
+ unsigned int i, j;
+
+ sa6.sin6_family = AF_INET6;
+ sa6.sin6_port = htons(IP6_LPORT);
+ err = inet_pton(AF_INET6, IP6_LADDR_START, &sa6.sin6_addr);
+ if (err != 1)
+ error(1, err, "inet_pton(%s)", IP6_LADDR_START);
+
+ lfds = malloc(nr_vips * nr_socks_per_vip * sizeof(lfds[0]));
+ if (!lfds)
+ error(1, errno, "cannot alloc array of lfds");
+
+ cur_fd = lfds;
+ for (i = 0; i < nr_vips; i++) {
+ for (j = 0; j < nr_socks_per_vip; j++) {
+ *cur_fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (*cur_fd == -1)
+ error(1, errno,
+ "lfds[%u,%u] = socket(AF_INET6)", i, j);
+
+ err = setsockopt(*cur_fd, SOL_SOCKET, SO_REUSEPORT,
+ &optvalue, sizeof(optvalue));
+ if (err)
+ error(1, errno,
+ "setsockopt(lfds[%u,%u], SO_REUSEPORT)",
+ i, j);
+
+ err = bind(*cur_fd, (struct sockaddr *)&sa6,
+ sizeof(sa6));
+ if (err)
+ error(1, errno, "bind(lfds[%u,%u])", i, j);
+ cur_fd++;
+ }
+ sa6.sin6_addr.s6_addr32[3]++;
+ }
+
+ return lfds;
+}
+
+int main(int argc, const char *argv[])
+{
+ struct timespec start_ts, end_ts;
+ unsigned long start_ns, end_ns;
+ unsigned int nr_lsocks;
+ int *lfds, i, err;
+
+ if (argc != 3 || atoi(argv[1]) <= 0 || atoi(argv[2]) <= 0)
+ error(1, 0, "Usage: %s <nr_vips> <nr_socks_per_vip>\n",
+ argv[0]);
+
+ nr_vips = atoi(argv[1]);
+ nr_socks_per_vip = atoi(argv[2]);
+ nr_lsocks = nr_vips * nr_socks_per_vip;
+ lfds = bind_reuseport_sock6();
+
+ clock_gettime(CLOCK_MONOTONIC, &start_ts);
+ for (i = 0; i < nr_lsocks; i++) {
+ err = listen(lfds[i], 0);
+ if (err)
+ error(1, errno, "listen(lfds[%d])", i);
+ }
+ clock_gettime(CLOCK_MONOTONIC, &end_ts);
+
+ start_ns = start_ts.tv_sec * NSEC_PER_SEC + start_ts.tv_nsec;
+ end_ns = end_ts.tv_sec * NSEC_PER_SEC + end_ts.tv_nsec;
+
+ printf("listen %d socks took %lu.%lu\n", nr_lsocks,
+ (end_ns - start_ns) / NSEC_PER_SEC,
+ (end_ns - start_ns) / NSEC_PER_USEC);
+
+ for (i = 0; i < nr_lsocks; i++)
+ close(lfds[i]);
+
+ free(lfds);
+ return 0;
+}
diff --git a/tools/testing/selftests/net/stress_reuseport_listen.sh b/tools/testing/selftests/net/stress_reuseport_listen.sh
new file mode 100755
index 000000000000..4de11da4092b
--- /dev/null
+++ b/tools/testing/selftests/net/stress_reuseport_listen.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+
+NS='stress_reuseport_listen_ns'
+NR_FILES=24100
+SAVED_NR_FILES=$(ulimit -n)
+
+setup() {
+ ip netns add $NS
+ ip netns exec $NS sysctl -q -w net.ipv6.ip_nonlocal_bind=1
+ ulimit -n $NR_FILES
+}
+
+cleanup() {
+ ip netns del $NS
+ ulimit -n $SAVED_NR_FILES
+}
+
+trap cleanup EXIT
+setup
+# 300 different vips listen on port 443
+# Each vip:443 sockaddr has 80 LISTEN sock by using SO_REUSEPORT
+# Total 24000 listening socks
+ip netns exec $NS ./stress_reuseport_listen 300 80
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index 5d70b04c482c..2cbb12736596 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -235,6 +235,7 @@ FIXTURE_VARIANT(tls)
{
uint16_t tls_version;
uint16_t cipher_type;
+ bool nopad;
};
FIXTURE_VARIANT_ADD(tls, 12_aes_gcm)
@@ -297,9 +298,17 @@ FIXTURE_VARIANT_ADD(tls, 13_aes_gcm_256)
.cipher_type = TLS_CIPHER_AES_GCM_256,
};
+FIXTURE_VARIANT_ADD(tls, 13_nopad)
+{
+ .tls_version = TLS_1_3_VERSION,
+ .cipher_type = TLS_CIPHER_AES_GCM_128,
+ .nopad = true,
+};
+
FIXTURE_SETUP(tls)
{
struct tls_crypto_info_keys tls12;
+ int one = 1;
int ret;
tls_crypto_info_init(variant->tls_version, variant->cipher_type,
@@ -315,6 +324,12 @@ FIXTURE_SETUP(tls)
ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len);
ASSERT_EQ(ret, 0);
+
+ if (variant->nopad) {
+ ret = setsockopt(self->cfd, SOL_TLS, TLS_RX_EXPECT_NO_PAD,
+ (void *)&one, sizeof(one));
+ ASSERT_EQ(ret, 0);
+ }
}
FIXTURE_TEARDOWN(tls)
@@ -629,12 +644,14 @@ TEST_F(tls, splice_from_pipe2)
int p2[2];
int p[2];
+ memrnd(mem_send, sizeof(mem_send));
+
ASSERT_GE(pipe(p), 0);
ASSERT_GE(pipe(p2), 0);
- EXPECT_GE(write(p[1], mem_send, 8000), 0);
- EXPECT_GE(splice(p[0], NULL, self->fd, NULL, 8000, 0), 0);
- EXPECT_GE(write(p2[1], mem_send + 8000, 8000), 0);
- EXPECT_GE(splice(p2[0], NULL, self->fd, NULL, 8000, 0), 0);
+ EXPECT_EQ(write(p[1], mem_send, 8000), 8000);
+ EXPECT_EQ(splice(p[0], NULL, self->fd, NULL, 8000, 0), 8000);
+ EXPECT_EQ(write(p2[1], mem_send + 8000, 8000), 8000);
+ EXPECT_EQ(splice(p2[0], NULL, self->fd, NULL, 8000, 0), 8000);
EXPECT_EQ(recv(self->cfd, mem_recv, send_len, MSG_WAITALL), send_len);
EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
}
@@ -668,10 +685,12 @@ TEST_F(tls, splice_to_pipe)
char mem_recv[TLS_PAYLOAD_MAX_LEN];
int p[2];
+ memrnd(mem_send, sizeof(mem_send));
+
ASSERT_GE(pipe(p), 0);
- EXPECT_GE(send(self->fd, mem_send, send_len, 0), 0);
- EXPECT_GE(splice(self->cfd, NULL, p[1], NULL, send_len, 0), 0);
- EXPECT_GE(read(p[0], mem_recv, send_len), 0);
+ EXPECT_EQ(send(self->fd, mem_send, send_len, 0), send_len);
+ EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), send_len);
+ EXPECT_EQ(read(p[0], mem_recv, send_len), send_len);
EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
}
@@ -860,6 +879,8 @@ TEST_F(tls, multiple_send_single_recv)
char recv_mem[2 * 10];
char send_mem[10];
+ memrnd(send_mem, sizeof(send_mem));
+
EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0);
EXPECT_GE(send(self->fd, send_mem, send_len, 0), 0);
memset(recv_mem, 0, total_len);
@@ -876,6 +897,8 @@ TEST_F(tls, single_send_multiple_recv_non_align)
char recv_mem[recv_len * 2];
char send_mem[total_len];
+ memrnd(send_mem, sizeof(send_mem));
+
EXPECT_GE(send(self->fd, send_mem, total_len, 0), 0);
memset(recv_mem, 0, total_len);
@@ -921,10 +944,10 @@ TEST_F(tls, recv_peek)
char buf[15];
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
- EXPECT_NE(recv(self->cfd, buf, send_len, MSG_PEEK), -1);
+ EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_PEEK), send_len);
EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
memset(buf, 0, sizeof(buf));
- EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
+ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), send_len);
EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
}
@@ -1582,6 +1605,38 @@ TEST_F(tls_err, bad_cmsg)
EXPECT_EQ(errno, EBADMSG);
}
+TEST_F(tls_err, timeo)
+{
+ struct timeval tv = { .tv_usec = 10000, };
+ char buf[128];
+ int ret;
+
+ if (self->notls)
+ SKIP(return, "no TLS support");
+
+ ret = setsockopt(self->cfd2, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv));
+ ASSERT_EQ(ret, 0);
+
+ ret = fork();
+ ASSERT_GE(ret, 0);
+
+ if (ret) {
+ usleep(1000); /* Give child a head start */
+
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+ EXPECT_EQ(errno, EAGAIN);
+
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+ EXPECT_EQ(errno, EAGAIN);
+
+ wait(&ret);
+ } else {
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+ EXPECT_EQ(errno, EAGAIN);
+ exit(0);
+ }
+}
+
TEST(non_established) {
struct tls12_crypto_info_aes_gcm_256 tls12;
struct sockaddr_in addr;
@@ -1659,6 +1714,57 @@ TEST(keysizes) {
close(cfd);
}
+TEST(no_pad) {
+ struct tls12_crypto_info_aes_gcm_256 tls12;
+ int ret, fd, cfd, val;
+ socklen_t len;
+ bool notls;
+
+ memset(&tls12, 0, sizeof(tls12));
+ tls12.info.version = TLS_1_3_VERSION;
+ tls12.info.cipher_type = TLS_CIPHER_AES_GCM_256;
+
+ ulp_sock_pair(_metadata, &fd, &cfd, &notls);
+
+ if (notls)
+ exit(KSFT_SKIP);
+
+ ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12, sizeof(tls12));
+ EXPECT_EQ(ret, 0);
+
+ ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12, sizeof(tls12));
+ EXPECT_EQ(ret, 0);
+
+ val = 1;
+ ret = setsockopt(cfd, SOL_TLS, TLS_RX_EXPECT_NO_PAD,
+ (void *)&val, sizeof(val));
+ EXPECT_EQ(ret, 0);
+
+ len = sizeof(val);
+ val = 2;
+ ret = getsockopt(cfd, SOL_TLS, TLS_RX_EXPECT_NO_PAD,
+ (void *)&val, &len);
+ EXPECT_EQ(ret, 0);
+ EXPECT_EQ(val, 1);
+ EXPECT_EQ(len, 4);
+
+ val = 0;
+ ret = setsockopt(cfd, SOL_TLS, TLS_RX_EXPECT_NO_PAD,
+ (void *)&val, sizeof(val));
+ EXPECT_EQ(ret, 0);
+
+ len = sizeof(val);
+ val = 2;
+ ret = getsockopt(cfd, SOL_TLS, TLS_RX_EXPECT_NO_PAD,
+ (void *)&val, &len);
+ EXPECT_EQ(ret, 0);
+ EXPECT_EQ(val, 0);
+ EXPECT_EQ(len, 4);
+
+ close(fd);
+ close(cfd);
+}
+
TEST(tls_v6ops) {
struct tls_crypto_info_keys tls12;
struct sockaddr_in6 addr, addr2;
diff --git a/tools/testing/selftests/net/tun.c b/tools/testing/selftests/net/tun.c
new file mode 100644
index 000000000000..fa83918b62d1
--- /dev/null
+++ b/tools/testing/selftests/net/tun.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <linux/if.h>
+#include <linux/if_tun.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+
+#include "../kselftest_harness.h"
+
+static int tun_attach(int fd, char *dev)
+{
+ struct ifreq ifr;
+
+ memset(&ifr, 0, sizeof(ifr));
+ strcpy(ifr.ifr_name, dev);
+ ifr.ifr_flags = IFF_ATTACH_QUEUE;
+
+ return ioctl(fd, TUNSETQUEUE, (void *) &ifr);
+}
+
+static int tun_detach(int fd, char *dev)
+{
+ struct ifreq ifr;
+
+ memset(&ifr, 0, sizeof(ifr));
+ strcpy(ifr.ifr_name, dev);
+ ifr.ifr_flags = IFF_DETACH_QUEUE;
+
+ return ioctl(fd, TUNSETQUEUE, (void *) &ifr);
+}
+
+static int tun_alloc(char *dev)
+{
+ struct ifreq ifr;
+ int fd, err;
+
+ fd = open("/dev/net/tun", O_RDWR);
+ if (fd < 0) {
+ fprintf(stderr, "can't open tun: %s\n", strerror(errno));
+ return fd;
+ }
+
+ memset(&ifr, 0, sizeof(ifr));
+ strcpy(ifr.ifr_name, dev);
+ ifr.ifr_flags = IFF_TAP | IFF_NAPI | IFF_MULTI_QUEUE;
+
+ err = ioctl(fd, TUNSETIFF, (void *) &ifr);
+ if (err < 0) {
+ fprintf(stderr, "can't TUNSETIFF: %s\n", strerror(errno));
+ close(fd);
+ return err;
+ }
+ strcpy(dev, ifr.ifr_name);
+ return fd;
+}
+
+static int tun_delete(char *dev)
+{
+ struct {
+ struct nlmsghdr nh;
+ struct ifinfomsg ifm;
+ unsigned char data[64];
+ } req;
+ struct rtattr *rta;
+ int ret, rtnl;
+
+ rtnl = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_ROUTE);
+ if (rtnl < 0) {
+ fprintf(stderr, "can't open rtnl: %s\n", strerror(errno));
+ return 1;
+ }
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_ALIGN(NLMSG_LENGTH(sizeof(req.ifm)));
+ req.nh.nlmsg_flags = NLM_F_REQUEST;
+ req.nh.nlmsg_type = RTM_DELLINK;
+
+ req.ifm.ifi_family = AF_UNSPEC;
+
+ rta = (struct rtattr *)(((char *)&req) + NLMSG_ALIGN(req.nh.nlmsg_len));
+ rta->rta_type = IFLA_IFNAME;
+ rta->rta_len = RTA_LENGTH(IFNAMSIZ);
+ req.nh.nlmsg_len += rta->rta_len;
+ memcpy(RTA_DATA(rta), dev, IFNAMSIZ);
+
+ ret = send(rtnl, &req, req.nh.nlmsg_len, 0);
+ if (ret < 0)
+ fprintf(stderr, "can't send: %s\n", strerror(errno));
+ ret = (unsigned int)ret != req.nh.nlmsg_len;
+
+ close(rtnl);
+ return ret;
+}
+
+FIXTURE(tun)
+{
+ char ifname[IFNAMSIZ];
+ int fd, fd2;
+};
+
+FIXTURE_SETUP(tun)
+{
+ memset(self->ifname, 0, sizeof(self->ifname));
+
+ self->fd = tun_alloc(self->ifname);
+ ASSERT_GE(self->fd, 0);
+
+ self->fd2 = tun_alloc(self->ifname);
+ ASSERT_GE(self->fd2, 0);
+}
+
+FIXTURE_TEARDOWN(tun)
+{
+ if (self->fd >= 0)
+ close(self->fd);
+ if (self->fd2 >= 0)
+ close(self->fd2);
+}
+
+TEST_F(tun, delete_detach_close) {
+ EXPECT_EQ(tun_delete(self->ifname), 0);
+ EXPECT_EQ(tun_detach(self->fd, self->ifname), -1);
+ EXPECT_EQ(errno, 22);
+}
+
+TEST_F(tun, detach_delete_close) {
+ EXPECT_EQ(tun_detach(self->fd, self->ifname), 0);
+ EXPECT_EQ(tun_delete(self->ifname), 0);
+}
+
+TEST_F(tun, detach_close_delete) {
+ EXPECT_EQ(tun_detach(self->fd, self->ifname), 0);
+ close(self->fd);
+ self->fd = -1;
+ EXPECT_EQ(tun_delete(self->ifname), 0);
+}
+
+TEST_F(tun, reattach_delete_close) {
+ EXPECT_EQ(tun_detach(self->fd, self->ifname), 0);
+ EXPECT_EQ(tun_attach(self->fd, self->ifname), 0);
+ EXPECT_EQ(tun_delete(self->ifname), 0);
+}
+
+TEST_F(tun, reattach_close_delete) {
+ EXPECT_EQ(tun_detach(self->fd, self->ifname), 0);
+ EXPECT_EQ(tun_attach(self->fd, self->ifname), 0);
+ close(self->fd);
+ self->fd = -1;
+ EXPECT_EQ(tun_delete(self->ifname), 0);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh
index f8a19f548ae9..ebbd0b282432 100755
--- a/tools/testing/selftests/net/udpgro.sh
+++ b/tools/testing/selftests/net/udpgro.sh
@@ -34,7 +34,7 @@ cfg_veth() {
ip -netns "${PEER_NS}" addr add dev veth1 192.168.1.1/24
ip -netns "${PEER_NS}" addr add dev veth1 2001:db8::1/64 nodad
ip -netns "${PEER_NS}" link set dev veth1 up
- ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp_dummy
+ ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp
}
run_one() {
diff --git a/tools/testing/selftests/net/udpgro_bench.sh b/tools/testing/selftests/net/udpgro_bench.sh
index 820bc50f6b68..fad2d1a71cac 100755
--- a/tools/testing/selftests/net/udpgro_bench.sh
+++ b/tools/testing/selftests/net/udpgro_bench.sh
@@ -34,7 +34,7 @@ run_one() {
ip -netns "${PEER_NS}" addr add dev veth1 2001:db8::1/64 nodad
ip -netns "${PEER_NS}" link set dev veth1 up
- ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp_dummy
+ ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp
ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -r &
ip netns exec "${PEER_NS}" ./udpgso_bench_rx -t ${rx_args} -r &
diff --git a/tools/testing/selftests/net/udpgro_frglist.sh b/tools/testing/selftests/net/udpgro_frglist.sh
index 807b74c8fd80..832c738cc3c2 100755
--- a/tools/testing/selftests/net/udpgro_frglist.sh
+++ b/tools/testing/selftests/net/udpgro_frglist.sh
@@ -36,7 +36,7 @@ run_one() {
ip netns exec "${PEER_NS}" ethtool -K veth1 rx-gro-list on
- ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp_dummy
+ ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp
tc -n "${PEER_NS}" qdisc add dev veth1 clsact
tc -n "${PEER_NS}" filter add dev veth1 ingress prio 4 protocol ipv6 bpf object-file ../bpf/nat6to4.o section schedcls/ingress6/nat_6 direct-action
tc -n "${PEER_NS}" filter add dev veth1 egress prio 4 protocol ip bpf object-file ../bpf/nat6to4.o section schedcls/egress4/snat4 direct-action
diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh
index 6f05e06f6761..1bcd82e1f662 100755
--- a/tools/testing/selftests/net/udpgro_fwd.sh
+++ b/tools/testing/selftests/net/udpgro_fwd.sh
@@ -46,7 +46,7 @@ create_ns() {
ip -n $BASE$ns addr add dev veth$ns $BM_NET_V4$ns/24
ip -n $BASE$ns addr add dev veth$ns $BM_NET_V6$ns/64 nodad
done
- ip -n $NS_DST link set veth$DST xdp object ../bpf/xdp_dummy.o section xdp_dummy 2>/dev/null
+ ip -n $NS_DST link set veth$DST xdp object ../bpf/xdp_dummy.o section xdp 2>/dev/null
}
create_vxlan_endpoint() {
diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh
index 80b5d352702e..dc932fd65363 100755
--- a/tools/testing/selftests/net/udpgso_bench.sh
+++ b/tools/testing/selftests/net/udpgso_bench.sh
@@ -120,7 +120,7 @@ run_all() {
run_udp "${ipv4_args}"
echo "ipv6"
- run_tcp "${ipv4_args}"
+ run_tcp "${ipv6_args}"
run_udp "${ipv6_args}"
}
diff --git a/tools/testing/selftests/net/veth.sh b/tools/testing/selftests/net/veth.sh
index 19eac3e44c06..430895d1a2b6 100755
--- a/tools/testing/selftests/net/veth.sh
+++ b/tools/testing/selftests/net/veth.sh
@@ -289,14 +289,14 @@ if [ $CPUS -gt 1 ]; then
ip netns exec $NS_SRC ethtool -L veth$SRC rx 1 tx 2 2>/dev/null
printf "%-60s" "bad setting: XDP with RX nr less than TX"
ip -n $NS_DST link set dev veth$DST xdp object ../bpf/xdp_dummy.o \
- section xdp_dummy 2>/dev/null &&\
+ section xdp 2>/dev/null &&\
echo "fail - set operation successful ?!?" || echo " ok "
# the following tests will run with multiple channels active
ip netns exec $NS_SRC ethtool -L veth$SRC rx 2
ip netns exec $NS_DST ethtool -L veth$DST rx 2
ip -n $NS_DST link set dev veth$DST xdp object ../bpf/xdp_dummy.o \
- section xdp_dummy 2>/dev/null
+ section xdp 2>/dev/null
printf "%-60s" "bad setting: reducing RX nr below peer TX with XDP set"
ip netns exec $NS_DST ethtool -L veth$DST rx 1 2>/dev/null &&\
echo "fail - set operation successful ?!?" || echo " ok "
@@ -311,7 +311,7 @@ if [ $CPUS -gt 2 ]; then
chk_channels "setting invalid channels nr" $DST 2 2
fi
-ip -n $NS_DST link set dev veth$DST xdp object ../bpf/xdp_dummy.o section xdp_dummy 2>/dev/null
+ip -n $NS_DST link set dev veth$DST xdp object ../bpf/xdp_dummy.o section xdp 2>/dev/null
chk_gro_flag "with xdp attached - gro flag" $DST on
chk_gro_flag " - peer gro flag" $SRC off
chk_tso_flag " - tso flag" $SRC off
diff --git a/tools/testing/selftests/net/vrf_strict_mode_test.sh b/tools/testing/selftests/net/vrf_strict_mode_test.sh
index 865d53c1781c..417d214264f3 100755
--- a/tools/testing/selftests/net/vrf_strict_mode_test.sh
+++ b/tools/testing/selftests/net/vrf_strict_mode_test.sh
@@ -14,6 +14,8 @@ INIT_NETNS_NAME="init"
PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
+TESTS="init testns mix"
+
log_test()
{
local rc=$1
@@ -262,6 +264,8 @@ cleanup()
vrf_strict_mode_tests_init()
{
+ log_section "VRF strict_mode test on init network namespace"
+
vrf_strict_mode_check_support init
strict_mode_check_default init
@@ -292,6 +296,8 @@ vrf_strict_mode_tests_init()
vrf_strict_mode_tests_testns()
{
+ log_section "VRF strict_mode test on testns network namespace"
+
vrf_strict_mode_check_support testns
strict_mode_check_default testns
@@ -318,6 +324,8 @@ vrf_strict_mode_tests_testns()
vrf_strict_mode_tests_mix()
{
+ log_section "VRF strict_mode test mixing init and testns network namespaces"
+
read_strict_mode_compare_and_check init 1
read_strict_mode_compare_and_check testns 0
@@ -341,18 +349,30 @@ vrf_strict_mode_tests_mix()
read_strict_mode_compare_and_check testns 0
}
-vrf_strict_mode_tests()
-{
- log_section "VRF strict_mode test on init network namespace"
- vrf_strict_mode_tests_init
+################################################################################
+# usage
- log_section "VRF strict_mode test on testns network namespace"
- vrf_strict_mode_tests_testns
+usage()
+{
+ cat <<EOF
+usage: ${0##*/} OPTS
- log_section "VRF strict_mode test mixing init and testns network namespaces"
- vrf_strict_mode_tests_mix
+ -t <test> Test(s) to run (default: all)
+ (options: $TESTS)
+EOF
}
+################################################################################
+# main
+
+while getopts ":t:h" opt; do
+ case $opt in
+ t) TESTS=$OPTARG;;
+ h) usage; exit 0;;
+ *) usage; exit 1;;
+ esac
+done
+
vrf_strict_mode_check_support()
{
local nsname=$1
@@ -391,7 +411,17 @@ fi
cleanup &> /dev/null
setup
-vrf_strict_mode_tests
+for t in $TESTS
+do
+ case $t in
+ vrf_strict_mode_tests_init|init) vrf_strict_mode_tests_init;;
+ vrf_strict_mode_tests_testns|testns) vrf_strict_mode_tests_testns;;
+ vrf_strict_mode_tests_mix|mix) vrf_strict_mode_tests_mix;;
+
+ help) echo "Test names: $TESTS"; exit 0;;
+
+ esac
+done
cleanup
print_log_test_results
diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh
index b35010cc7f6a..a6991877e50c 100755
--- a/tools/testing/selftests/netfilter/nft_concat_range.sh
+++ b/tools/testing/selftests/netfilter/nft_concat_range.sh
@@ -31,7 +31,7 @@ BUGS="flush_remove_add reload"
# List of possible paths to pktgen script from kernel tree for performance tests
PKTGEN_SCRIPT_PATHS="
- ../../../samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
+ ../../../../samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
pktgen/pktgen_bench_xmit_mode_netif_receive.sh"
# Definition of set types:
diff --git a/tools/testing/selftests/netfilter/nft_fib.sh b/tools/testing/selftests/netfilter/nft_fib.sh
index 695a1958723f..fd76b69635a4 100755
--- a/tools/testing/selftests/netfilter/nft_fib.sh
+++ b/tools/testing/selftests/netfilter/nft_fib.sh
@@ -66,6 +66,20 @@ table inet filter {
EOF
}
+load_pbr_ruleset() {
+ local netns=$1
+
+ip netns exec ${netns} nft -f /dev/stdin <<EOF
+table inet filter {
+ chain forward {
+ type filter hook forward priority raw;
+ fib saddr . iif oif gt 0 accept
+ log drop
+ }
+}
+EOF
+}
+
load_ruleset_count() {
local netns=$1
@@ -219,4 +233,40 @@ sleep 2
ip netns exec ${ns1} ping -c 3 -q 1c3::c01d > /dev/null
check_fib_counter 3 ${nsrouter} 1c3::c01d || exit 1
+# delete all rules
+ip netns exec ${ns1} nft flush ruleset
+ip netns exec ${ns2} nft flush ruleset
+ip netns exec ${nsrouter} nft flush ruleset
+
+ip -net ${ns1} addr add 10.0.1.99/24 dev eth0
+ip -net ${ns1} addr add dead:1::99/64 dev eth0
+
+ip -net ${ns1} addr del 10.0.2.99/24 dev eth0
+ip -net ${ns1} addr del dead:2::99/64 dev eth0
+
+ip -net ${nsrouter} addr del dead:2::1/64 dev veth0
+
+# ... pbr ruleset for the router, check iif+oif.
+load_pbr_ruleset ${nsrouter}
+if [ $? -ne 0 ] ; then
+ echo "SKIP: Could not load fib forward ruleset"
+ exit $ksft_skip
+fi
+
+ip -net ${nsrouter} rule add from all table 128
+ip -net ${nsrouter} rule add from all iif veth0 table 129
+ip -net ${nsrouter} route add table 128 to 10.0.1.0/24 dev veth0
+ip -net ${nsrouter} route add table 129 to 10.0.2.0/24 dev veth1
+
+# drop main ipv4 table
+ip -net ${nsrouter} -4 rule delete table main
+
+test_ping 10.0.2.99 dead:2::99
+if [ $? -ne 0 ] ; then
+ ip -net ${nsrouter} nft list ruleset
+ echo "FAIL: fib mismatch in pbr setup"
+ exit 1
+fi
+
+echo "PASS: fib expression forward check with policy based routing"
exit 0
diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
index eb8543b9a5c4..924ecb3f1f73 100755
--- a/tools/testing/selftests/netfilter/nft_nat.sh
+++ b/tools/testing/selftests/netfilter/nft_nat.sh
@@ -374,6 +374,45 @@ EOF
return $lret
}
+test_local_dnat_portonly()
+{
+ local family=$1
+ local daddr=$2
+ local lret=0
+ local sr_s
+ local sr_r
+
+ip netns exec "$ns0" nft -f /dev/stdin <<EOF
+table $family nat {
+ chain output {
+ type nat hook output priority 0; policy accept;
+ meta l4proto tcp dnat to :2000
+
+ }
+}
+EOF
+ if [ $? -ne 0 ]; then
+ if [ $family = "inet" ];then
+ echo "SKIP: inet port test"
+ test_inet_nat=false
+ return
+ fi
+ echo "SKIP: Could not add $family dnat hook"
+ return
+ fi
+
+ echo SERVER-$family | ip netns exec "$ns1" timeout 5 socat -u STDIN TCP-LISTEN:2000 &
+ sc_s=$!
+
+ result=$(ip netns exec "$ns0" timeout 1 socat TCP:$daddr:2000 STDOUT)
+
+ if [ "$result" = "SERVER-inet" ];then
+ echo "PASS: inet port rewrite without l3 address"
+ else
+ echo "ERROR: inet port rewrite"
+ ret=1
+ fi
+}
test_masquerade6()
{
@@ -1148,6 +1187,10 @@ fi
reset_counters
test_local_dnat ip
test_local_dnat6 ip6
+
+reset_counters
+test_local_dnat_portonly inet 10.0.1.99
+
reset_counters
$test_inet_nat && test_local_dnat inet
$test_inet_nat && test_local_dnat6 inet
diff --git a/tools/testing/selftests/powerpc/include/utils.h b/tools/testing/selftests/powerpc/include/utils.h
index b7d188fc87c7..b9fa9cd709df 100644
--- a/tools/testing/selftests/powerpc/include/utils.h
+++ b/tools/testing/selftests/powerpc/include/utils.h
@@ -135,6 +135,11 @@ do { \
#define PPC_FEATURE2_ARCH_3_1 0x00040000
#endif
+/* POWER10 features */
+#ifndef PPC_FEATURE2_MMA
+#define PPC_FEATURE2_MMA 0x00020000
+#endif
+
#if defined(__powerpc64__)
#define UCONTEXT_NIA(UC) (UC)->uc_mcontext.gp_regs[PT_NIP]
#define UCONTEXT_MSR(UC) (UC)->uc_mcontext.gp_regs[PT_MSR]
diff --git a/tools/testing/selftests/powerpc/math/Makefile b/tools/testing/selftests/powerpc/math/Makefile
index fcc91c205984..3948f7c510aa 100644
--- a/tools/testing/selftests/powerpc/math/Makefile
+++ b/tools/testing/selftests/powerpc/math/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal fpu_denormal vmx_syscall vmx_preempt vmx_signal vsx_preempt
+TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal fpu_denormal vmx_syscall vmx_preempt vmx_signal vsx_preempt mma
top_srcdir = ../../../../..
include ../../lib.mk
@@ -17,3 +17,5 @@ $(OUTPUT)/vmx_signal: vmx_asm.S ../utils.c
$(OUTPUT)/vsx_preempt: CFLAGS += -mvsx
$(OUTPUT)/vsx_preempt: vsx_asm.S ../utils.c
+
+$(OUTPUT)/mma: mma.c mma.S ../utils.c
diff --git a/tools/testing/selftests/powerpc/math/mma.S b/tools/testing/selftests/powerpc/math/mma.S
new file mode 100644
index 000000000000..8528c9849565
--- /dev/null
+++ b/tools/testing/selftests/powerpc/math/mma.S
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Test basic matrix multiply assist (MMA) functionality if available.
+ *
+ * Copyright 2020, Alistair Popple, IBM Corp.
+ */
+ .global test_mma
+test_mma:
+ /* Load accumulator via VSX registers from image passed in r3 */
+ lxvh8x 4,0,3
+ lxvh8x 5,0,4
+
+ /* Clear and prime the accumulator (xxsetaccz) */
+ .long 0x7c030162
+
+ /* Prime the accumulator with MMA VSX move to accumulator
+ * X-form (xxmtacc) (not needed due to above zeroing) */
+ //.long 0x7c010162
+
+ /* xvi16ger2s */
+ .long 0xec042958
+
+ /* Store result in image passed in r5 */
+ stxvw4x 0,0,5
+ addi 5,5,16
+ stxvw4x 1,0,5
+ addi 5,5,16
+ stxvw4x 2,0,5
+ addi 5,5,16
+ stxvw4x 3,0,5
+ addi 5,5,16
+
+ blr
diff --git a/tools/testing/selftests/powerpc/math/mma.c b/tools/testing/selftests/powerpc/math/mma.c
new file mode 100644
index 000000000000..3a71808c993f
--- /dev/null
+++ b/tools/testing/selftests/powerpc/math/mma.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Test basic matrix multiply assist (MMA) functionality if available.
+ *
+ * Copyright 2020, Alistair Popple, IBM Corp.
+ */
+#include <stdio.h>
+#include <stdint.h>
+
+#include "utils.h"
+
+extern void test_mma(uint16_t (*)[8], uint16_t (*)[8], uint32_t (*)[4*4]);
+
+static int mma(void)
+{
+ int i;
+ int rc = 0;
+ uint16_t x[] = {1, 0, 2, 0, 3, 0, 4, 0};
+ uint16_t y[] = {1, 0, 2, 0, 3, 0, 4, 0};
+ uint32_t z[4*4];
+ uint32_t exp[4*4] = {1, 2, 3, 4,
+ 2, 4, 6, 8,
+ 3, 6, 9, 12,
+ 4, 8, 12, 16};
+
+ SKIP_IF_MSG(!have_hwcap2(PPC_FEATURE2_ARCH_3_1), "Need ISAv3.1");
+ SKIP_IF_MSG(!have_hwcap2(PPC_FEATURE2_MMA), "Need MMA");
+
+ test_mma(&x, &y, &z);
+
+ for (i = 0; i < 16; i++) {
+ printf("MMA[%d] = %d ", i, z[i]);
+
+ if (z[i] == exp[i]) {
+ printf(" (Correct)\n");
+ } else {
+ printf(" (Incorrect)\n");
+ rc = 1;
+ }
+ }
+
+ return rc;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(mma, "mma");
+}
diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore
index aac4a59f9e28..4e1a294eec35 100644
--- a/tools/testing/selftests/powerpc/mm/.gitignore
+++ b/tools/testing/selftests/powerpc/mm/.gitignore
@@ -12,3 +12,4 @@ pkey_exec_prot
pkey_siginfo
stack_expansion_ldst
stack_expansion_signal
+large_vm_gpr_corruption
diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile
index 40253abc6208..27dc09d0bfee 100644
--- a/tools/testing/selftests/powerpc/mm/Makefile
+++ b/tools/testing/selftests/powerpc/mm/Makefile
@@ -4,7 +4,8 @@ noarg:
TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \
large_vm_fork_separation bad_accesses pkey_exec_prot \
- pkey_siginfo stack_expansion_signal stack_expansion_ldst
+ pkey_siginfo stack_expansion_signal stack_expansion_ldst \
+ large_vm_gpr_corruption
TEST_PROGS := stress_code_patching.sh
TEST_GEN_PROGS_EXTENDED := tlbie_test
@@ -19,6 +20,7 @@ $(OUTPUT)/prot_sao: ../utils.c
$(OUTPUT)/wild_bctr: CFLAGS += -m64
$(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64
+$(OUTPUT)/large_vm_gpr_corruption: CFLAGS += -m64
$(OUTPUT)/bad_accesses: CFLAGS += -m64
$(OUTPUT)/pkey_exec_prot: CFLAGS += -m64
$(OUTPUT)/pkey_siginfo: CFLAGS += -m64
diff --git a/tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c b/tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c
new file mode 100644
index 000000000000..927bfae99ed9
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2022, Michael Ellerman, IBM Corp.
+//
+// Test that the 4PB address space SLB handling doesn't corrupt userspace registers
+// (r9-r13) due to a SLB fault while saving the PPR.
+//
+// The bug was introduced in f384796c4 ("powerpc/mm: Add support for handling > 512TB
+// address in SLB miss") and fixed in 4c2de74cc869 ("powerpc/64: Interrupts save PPR on
+// stack rather than thread_struct").
+//
+// To hit the bug requires the task struct and kernel stack to be in different segments.
+// Usually that requires more than 1TB of RAM, or if that's not practical, boot the kernel
+// with "disable_1tb_segments".
+//
+// The test works by creating mappings above 512TB, to trigger the large address space
+// support. It creates 64 mappings, double the size of the SLB, to cause SLB faults on
+// each access (assuming naive replacement). It then loops over those mappings touching
+// each, and checks that r9-r13 aren't corrupted.
+//
+// It then forks another child and tries again, because a new child process will get a new
+// kernel stack and thread struct allocated, which may be more optimally placed to trigger
+// the bug. It would probably be better to leave the previous child processes hanging
+// around, so that kernel stack & thread struct allocations are not reused, but that would
+// amount to a 30 second fork bomb. The current design reliably triggers the bug on
+// unpatched kernels.
+
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "utils.h"
+
+#ifndef MAP_FIXED_NOREPLACE
+#define MAP_FIXED_NOREPLACE MAP_FIXED // "Should be safe" above 512TB
+#endif
+
+#define BASE_ADDRESS (1ul << 50) // 1PB
+#define STRIDE (2ul << 40) // 2TB
+#define SLB_SIZE 32
+#define NR_MAPPINGS (SLB_SIZE * 2)
+
+static volatile sig_atomic_t signaled;
+
+static void signal_handler(int sig)
+{
+ signaled = 1;
+}
+
+#define CHECK_REG(_reg) \
+ if (_reg != _reg##_orig) { \
+ printf(str(_reg) " corrupted! Expected 0x%lx != 0x%lx\n", _reg##_orig, \
+ _reg); \
+ _exit(1); \
+ }
+
+static int touch_mappings(void)
+{
+ unsigned long r9_orig, r10_orig, r11_orig, r12_orig, r13_orig;
+ unsigned long r9, r10, r11, r12, r13;
+ unsigned long addr, *p;
+ int i;
+
+ for (i = 0; i < NR_MAPPINGS; i++) {
+ addr = BASE_ADDRESS + (i * STRIDE);
+ p = (unsigned long *)addr;
+
+ asm volatile("mr %0, %%r9 ;" // Read original GPR values
+ "mr %1, %%r10 ;"
+ "mr %2, %%r11 ;"
+ "mr %3, %%r12 ;"
+ "mr %4, %%r13 ;"
+ "std %10, 0(%11) ;" // Trigger SLB fault
+ "mr %5, %%r9 ;" // Save possibly corrupted values
+ "mr %6, %%r10 ;"
+ "mr %7, %%r11 ;"
+ "mr %8, %%r12 ;"
+ "mr %9, %%r13 ;"
+ "mr %%r9, %0 ;" // Restore original values
+ "mr %%r10, %1 ;"
+ "mr %%r11, %2 ;"
+ "mr %%r12, %3 ;"
+ "mr %%r13, %4 ;"
+ : "=&b"(r9_orig), "=&b"(r10_orig), "=&b"(r11_orig),
+ "=&b"(r12_orig), "=&b"(r13_orig), "=&b"(r9), "=&b"(r10),
+ "=&b"(r11), "=&b"(r12), "=&b"(r13)
+ : "b"(i), "b"(p)
+ : "r9", "r10", "r11", "r12", "r13");
+
+ CHECK_REG(r9);
+ CHECK_REG(r10);
+ CHECK_REG(r11);
+ CHECK_REG(r12);
+ CHECK_REG(r13);
+ }
+
+ return 0;
+}
+
+static int test(void)
+{
+ unsigned long page_size, addr, *p;
+ struct sigaction action;
+ bool hash_mmu;
+ int i, status;
+ pid_t pid;
+
+ // This tests a hash MMU specific bug.
+ FAIL_IF(using_hash_mmu(&hash_mmu));
+ SKIP_IF(!hash_mmu);
+
+ page_size = sysconf(_SC_PAGESIZE);
+
+ for (i = 0; i < NR_MAPPINGS; i++) {
+ addr = BASE_ADDRESS + (i * STRIDE);
+
+ p = mmap((void *)addr, page_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0);
+ if (p == MAP_FAILED) {
+ perror("mmap");
+ printf("Error: couldn't mmap(), confirm kernel has 4PB support?\n");
+ return 1;
+ }
+ }
+
+ action.sa_handler = signal_handler;
+ action.sa_flags = SA_RESTART;
+ FAIL_IF(sigaction(SIGALRM, &action, NULL) < 0);
+
+ // Seen to always crash in under ~10s on affected kernels.
+ alarm(30);
+
+ while (!signaled) {
+ // Fork new processes, to increase the chance that we hit the case where
+ // the kernel stack and task struct are in different segments.
+ pid = fork();
+ if (pid == 0)
+ exit(touch_mappings());
+
+ FAIL_IF(waitpid(-1, &status, 0) == -1);
+ FAIL_IF(WIFSIGNALED(status));
+ FAIL_IF(!WIFEXITED(status));
+ FAIL_IF(WEXITSTATUS(status));
+ }
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test, "large_vm_gpr_corruption");
+}
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S b/tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S
deleted file mode 100644
index 08a7b5f133b9..000000000000
--- a/tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2014, Michael Ellerman, IBM Corp.
- */
-
-#include <ppc-asm.h>
-
- .text
-
-FUNC_START(thirty_two_instruction_loop)
- cmpwi r3,0
- beqlr
- addi r4,r3,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1
- addi r4,r4,1 # 28 addi's
- subi r3,r3,1
- b FUNC_NAME(thirty_two_instruction_loop)
-FUNC_END(thirty_two_instruction_loop)
diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c
index fca054bbc094..c01a31d5f4ee 100644
--- a/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c
+++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c
@@ -274,7 +274,7 @@ u64 *get_intr_regs(struct event *event, void *sample_buff)
return intr_regs;
}
-static const unsigned int __perf_reg_mask(const char *register_name)
+static const int __perf_reg_mask(const char *register_name)
{
if (!strcmp(register_name, "R0"))
return 0;
diff --git a/tools/testing/selftests/powerpc/security/spectre_v2.c b/tools/testing/selftests/powerpc/security/spectre_v2.c
index d42ca8c676c3..5b2abb719ef2 100644
--- a/tools/testing/selftests/powerpc/security/spectre_v2.c
+++ b/tools/testing/selftests/powerpc/security/spectre_v2.c
@@ -182,17 +182,23 @@ int spectre_v2_test(void)
case COUNT_CACHE_FLUSH_HW:
// These should all not affect userspace branch prediction
if (miss_percent > 15) {
- printf("Branch misses > 15%% unexpected in this configuration!\n");
- printf("Possible mis-match between reported & actual mitigation\n");
- /*
- * Such a mismatch may be caused by a guest system
- * reporting as vulnerable when the host is mitigated.
- * Return skip code to avoid detecting this as an error.
- * We are not vulnerable and reporting otherwise, so
- * missing such a mismatch is safe.
- */
- if (miss_percent > 95)
+ if (miss_percent > 95) {
+ /*
+ * Such a mismatch may be caused by a system being unaware
+ * the count cache is disabled. This may be to enable
+ * guest migration between hosts with different settings.
+ * Return skip code to avoid detecting this as an error.
+ * We are not vulnerable and reporting otherwise, so
+ * missing such a mismatch is safe.
+ */
+ printf("Branch misses > 95%% unexpected in this configuration.\n");
+ printf("Count cache likely disabled without Linux knowing.\n");
+ if (state == COUNT_CACHE_FLUSH_SW)
+ printf("WARNING: Kernel performing unnecessary flushes.\n");
return 4;
+ }
+ printf("Branch misses > 15%% unexpected in this configuration!\n");
+ printf("Possible mismatch between reported & actual mitigation\n");
return 1;
}
@@ -201,14 +207,14 @@ int spectre_v2_test(void)
// This seems to affect userspace branch prediction a bit?
if (miss_percent > 25) {
printf("Branch misses > 25%% unexpected in this configuration!\n");
- printf("Possible mis-match between reported & actual mitigation\n");
+ printf("Possible mismatch between reported & actual mitigation\n");
return 1;
}
break;
case COUNT_CACHE_DISABLED:
if (miss_percent < 95) {
- printf("Branch misses < 20%% unexpected in this configuration!\n");
- printf("Possible mis-match between reported & actual mitigation\n");
+ printf("Branch misses < 95%% unexpected in this configuration!\n");
+ printf("Possible mismatch between reported & actual mitigation\n");
return 1;
}
break;
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-check-branches.sh b/tools/testing/selftests/rcutorture/bin/kvm-check-branches.sh
index f17000a2ccf1..ed0ec7f0927e 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-check-branches.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-check-branches.sh
@@ -35,7 +35,7 @@ then
exit 1
fi
-# Remember where we started so that we can get back and the end.
+# Remember where we started so that we can get back at the end.
curcommit="`git status | head -1 | awk '{ print $NF }'`"
nfail=0
@@ -73,15 +73,10 @@ do
# Test the specified commit.
git checkout $i > $resdir/$ds/$idir/git-checkout.out 2>&1
echo git checkout return code: $? "(Commit $ntry: $i)"
- kvm.sh --allcpus --duration 3 --trust-make > $resdir/$ds/$idir/kvm.sh.out 2>&1
+ kvm.sh --allcpus --duration 3 --trust-make --datestamp "$ds/$idir" > $resdir/$ds/$idir/kvm.sh.out 2>&1
ret=$?
echo kvm.sh return code $ret for commit $i from branch $gitbr
-
- # Move the build products to their resting place.
- runresdir="`grep -m 1 '^Results directory:' < $resdir/$ds/$idir/kvm.sh.out | sed -e 's/^Results directory://'`"
- mv $runresdir $resdir/$ds/$idir
- rrd="`echo $runresdir | sed -e 's,^.*/,,'`"
- echo Run results: $resdir/$ds/$idir/$rrd
+ echo Run results: $resdir/$ds/$idir
if test "$ret" -ne 0
then
# Failure, so leave all evidence intact.
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-remote.sh b/tools/testing/selftests/rcutorture/bin/kvm-remote.sh
index 0ff59bd8b640..9f0a5d5ff2dd 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-remote.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-remote.sh
@@ -262,6 +262,7 @@ echo All batches started. `date` | tee -a "$oldrun/remote-log"
# Wait for all remaining scenarios to complete and collect results.
for i in $systems
do
+ echo " ---" Waiting for $i `date` | tee -a "$oldrun/remote-log"
while checkremotefile "$i" "$resdir/$ds/remote.run"
do
sleep 30
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 263e16aeca0e..6c734818a875 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -164,7 +164,7 @@ do
shift
;;
--gdb)
- TORTURE_KCONFIG_GDB_ARG="CONFIG_DEBUG_INFO=y"; export TORTURE_KCONFIG_GDB_ARG
+ TORTURE_KCONFIG_GDB_ARG="CONFIG_DEBUG_INFO_NONE=n CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y"; export TORTURE_KCONFIG_GDB_ARG
TORTURE_BOOT_GDB_ARG="nokaslr"; export TORTURE_BOOT_GDB_ARG
TORTURE_QEMU_GDB_ARG="-s -S"; export TORTURE_QEMU_GDB_ARG
;;
@@ -180,7 +180,7 @@ do
shift
;;
--kasan)
- TORTURE_KCONFIG_KASAN_ARG="CONFIG_DEBUG_INFO=y CONFIG_KASAN=y"; export TORTURE_KCONFIG_KASAN_ARG
+ TORTURE_KCONFIG_KASAN_ARG="CONFIG_DEBUG_INFO_NONE=n CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_KASAN=y"; export TORTURE_KCONFIG_KASAN_ARG
if test -n "$torture_qemu_mem_default"
then
TORTURE_QEMU_MEM=2G
@@ -192,7 +192,7 @@ do
shift
;;
--kcsan)
- TORTURE_KCONFIG_KCSAN_ARG="CONFIG_DEBUG_INFO=y CONFIG_KCSAN=y CONFIG_KCSAN_STRICT=y CONFIG_KCSAN_REPORT_ONCE_IN_MS=100000 CONFIG_KCSAN_VERBOSE=y CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y"; export TORTURE_KCONFIG_KCSAN_ARG
+ TORTURE_KCONFIG_KCSAN_ARG="CONFIG_DEBUG_INFO_NONE=n CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_KCSAN=y CONFIG_KCSAN_STRICT=y CONFIG_KCSAN_REPORT_ONCE_IN_MS=100000 CONFIG_KCSAN_VERBOSE=y CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y"; export TORTURE_KCONFIG_KCSAN_ARG
;;
--kmake-arg|--kmake-args)
checkarg --kmake-arg "(kernel make arguments)" $# "$2" '.*' '^error$'
diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
index 6bcee2ec91a9..73d53257df42 100644
--- a/tools/testing/selftests/resctrl/Makefile
+++ b/tools/testing/selftests/resctrl/Makefile
@@ -1,17 +1,10 @@
-CC = $(CROSS_COMPILE)gcc
-CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2
-SRCS=$(wildcard *.c)
-OBJS=$(SRCS:.c=.o)
-
-all: resctrl_tests
+# SPDX-License-Identifier: GPL-2.0
-$(OBJS): $(SRCS)
- $(CC) $(CFLAGS) -c $(SRCS)
+CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2
+CFLAGS += $(KHDR_INCLUDES)
-resctrl_tests: $(OBJS)
- $(CC) $(CFLAGS) -o $@ $^
+TEST_GEN_PROGS := resctrl_tests
-.PHONY: clean
+include ../lib.mk
-clean:
- $(RM) $(OBJS) resctrl_tests
+$(OUTPUT)/resctrl_tests: $(wildcard *.c)
diff --git a/tools/testing/selftests/resctrl/README b/tools/testing/selftests/resctrl/README
index 3d2bbd4fa3aa..8d11ce7c2ee5 100644
--- a/tools/testing/selftests/resctrl/README
+++ b/tools/testing/selftests/resctrl/README
@@ -12,24 +12,49 @@ Allocation test on Intel RDT hardware. More tests will be added in the future.
And the test suit can be extended to cover AMD QoS and ARM MPAM hardware
as well.
+resctrl_tests can be run with or without kselftest framework.
+
+WITH KSELFTEST FRAMEWORK
+=======================
+
BUILD
-----
-Run "make" to build executable file "resctrl_tests".
+Build executable file "resctrl_tests" from top level directory of the kernel source:
+ $ make -C tools/testing/selftests TARGETS=resctrl
RUN
---
-To use resctrl_tests, root or sudoer privileges are required. This is because
-the test needs to mount resctrl file system and change contents in the file
-system.
+Run resctrl_tests as sudo or root since the test needs to mount resctrl file
+system and change contents in the file system.
+Using kselftest framework will run all supported tests within resctrl_tests:
+
+ $ sudo make -C tools/testing/selftests TARGETS=resctrl run_tests
+
+More details about kselftest framework can be found in
+Documentation/dev-tools/kselftest.rst.
+
+WITHOUT KSELFTEST FRAMEWORK
+===========================
+
+BUILD
+-----
+
+Build executable file "resctrl_tests" from this directory(tools/testing/selftests/resctrl/):
+ $ make
+
+RUN
+---
+Run resctrl_tests as sudo or root since the test needs to mount resctrl file
+system and change contents in the file system.
Executing the test without any parameter will run all supported tests:
- sudo ./resctrl_tests
+ $ sudo ./resctrl_tests
OVERVIEW OF EXECUTION
----------------------
+=====================
A test case has four stages:
@@ -41,7 +66,7 @@ A test case has four stages:
- teardown: umount resctrl and clear temporary files.
ARGUMENTS
----------
+=========
Parameter '-h' shows usage information.
diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
index cd4f68388e0f..1c5e90c63254 100644
--- a/tools/testing/selftests/resctrl/cat_test.c
+++ b/tools/testing/selftests/resctrl/cat_test.c
@@ -89,7 +89,7 @@ static int check_results(struct resctrl_val_param *param)
return show_cache_info(sum_llc_perf_miss, no_of_bits, param->span / 64,
MAX_DIFF, MAX_DIFF_PERCENT, NUM_OF_RUNS,
- !is_amd, false);
+ get_vendor() == ARCH_INTEL, false);
}
void cat_test_cleanup(void)
diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c
index 51e5cf22632f..56ccbeae0638 100644
--- a/tools/testing/selftests/resctrl/fill_buf.c
+++ b/tools/testing/selftests/resctrl/fill_buf.c
@@ -121,8 +121,10 @@ static int fill_cache_read(unsigned char *start_ptr, unsigned char *end_ptr,
/* Consume read result so that reading memory is not optimized out. */
fp = fopen("/dev/null", "w");
- if (!fp)
+ if (!fp) {
perror("Unable to write to /dev/null");
+ return -1;
+ }
fprintf(fp, "Sum: %d ", ret);
fclose(fp);
diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
index 1ad10c47e31d..f0ded31fb3c7 100644
--- a/tools/testing/selftests/resctrl/resctrl.h
+++ b/tools/testing/selftests/resctrl/resctrl.h
@@ -34,6 +34,9 @@
#define L3_MON_PATH "/sys/fs/resctrl/info/L3_MON"
#define L3_MON_FEATURES_PATH "/sys/fs/resctrl/info/L3_MON/mon_features"
+#define ARCH_INTEL 1
+#define ARCH_AMD 2
+
#define PARENT_EXIT(err_msg) \
do { \
perror(err_msg); \
@@ -75,8 +78,8 @@ struct resctrl_val_param {
extern pid_t bm_pid, ppid;
extern char llc_occup_path[1024];
-extern bool is_amd;
+int get_vendor(void);
bool check_resctrlfs_support(void);
int filter_dmesg(void);
int remount_resctrlfs(bool mum_resctrlfs);
diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
index 973f09a66e1e..df0d8d8526fc 100644
--- a/tools/testing/selftests/resctrl/resctrl_tests.c
+++ b/tools/testing/selftests/resctrl/resctrl_tests.c
@@ -13,25 +13,41 @@
#define BENCHMARK_ARGS 64
#define BENCHMARK_ARG_SIZE 64
-bool is_amd;
-
-void detect_amd(void)
+static int detect_vendor(void)
{
FILE *inf = fopen("/proc/cpuinfo", "r");
+ int vendor_id = 0;
+ char *s = NULL;
char *res;
if (!inf)
- return;
+ return vendor_id;
res = fgrep(inf, "vendor_id");
- if (res) {
- char *s = strchr(res, ':');
+ if (res)
+ s = strchr(res, ':');
+
+ if (s && !strcmp(s, ": GenuineIntel\n"))
+ vendor_id = ARCH_INTEL;
+ else if (s && !strcmp(s, ": AuthenticAMD\n"))
+ vendor_id = ARCH_AMD;
- is_amd = s && !strcmp(s, ": AuthenticAMD\n");
- free(res);
- }
fclose(inf);
+ free(res);
+ return vendor_id;
+}
+
+int get_vendor(void)
+{
+ static int vendor = -1;
+
+ if (vendor == -1)
+ vendor = detect_vendor();
+ if (vendor == 0)
+ ksft_print_msg("Can not get vendor info...\n");
+
+ return vendor;
}
static void cmd_help(void)
@@ -70,6 +86,8 @@ static void run_mbm_test(bool has_ben, char **benchmark_cmd, int span,
sprintf(benchmark_cmd[5], "%s", MBA_STR);
res = mbm_bw_change(span, cpu_no, bw_report, benchmark_cmd);
ksft_test_result(!res, "MBM: bw change\n");
+ if ((get_vendor() == ARCH_INTEL) && res)
+ ksft_print_msg("Intel MBM may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
mbm_test_cleanup();
}
@@ -106,6 +124,8 @@ static void run_cmt_test(bool has_ben, char **benchmark_cmd, int cpu_no)
sprintf(benchmark_cmd[5], "%s", CMT_STR);
res = cmt_resctrl_val(cpu_no, 5, benchmark_cmd);
ksft_test_result(!res, "CMT: test\n");
+ if ((get_vendor() == ARCH_INTEL) && res)
+ ksft_print_msg("Intel CMT may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
cmt_test_cleanup();
}
@@ -205,10 +225,7 @@ int main(int argc, char **argv)
* 2. We execute perf commands
*/
if (geteuid() != 0)
- return ksft_exit_fail_msg("Not running as root, abort testing.\n");
-
- /* Detect AMD vendor */
- detect_amd();
+ return ksft_exit_skip("Not running as root. Skipping...\n");
if (has_ben) {
/* Extract benchmark command from command line. */
@@ -235,16 +252,16 @@ int main(int argc, char **argv)
sprintf(bm_type, "fill_buf");
if (!check_resctrlfs_support())
- return ksft_exit_fail_msg("resctrl FS does not exist\n");
+ return ksft_exit_skip("resctrl FS does not exist. Enable X86_CPU_RESCTRL config option.\n");
filter_dmesg();
ksft_set_plan(tests ? : 4);
- if (!is_amd && mbm_test)
+ if ((get_vendor() == ARCH_INTEL) && mbm_test)
run_mbm_test(has_ben, benchmark_cmd, span, cpu_no, bw_report);
- if (!is_amd && mba_test)
+ if ((get_vendor() == ARCH_INTEL) && mba_test)
run_mba_test(has_ben, benchmark_cmd, span, cpu_no, bw_report);
if (cmt_test)
diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
index 95224345c78e..b32b96356ec7 100644
--- a/tools/testing/selftests/resctrl/resctrl_val.c
+++ b/tools/testing/selftests/resctrl/resctrl_val.c
@@ -678,6 +678,7 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
sigemptyset(&sigact.sa_mask);
sigact.sa_flags = SA_SIGINFO;
if (sigaction(SIGINT, &sigact, NULL) ||
+ sigaction(SIGTERM, &sigact, NULL) ||
sigaction(SIGHUP, &sigact, NULL)) {
perror("# sigaction");
ret = errno;
diff --git a/tools/testing/selftests/resctrl/resctrlfs.c b/tools/testing/selftests/resctrl/resctrlfs.c
index 5f5a166ade60..6f543e470ad4 100644
--- a/tools/testing/selftests/resctrl/resctrlfs.c
+++ b/tools/testing/selftests/resctrl/resctrlfs.c
@@ -106,7 +106,7 @@ int get_resource_id(int cpu_no, int *resource_id)
char phys_pkg_path[1024];
FILE *fp;
- if (is_amd)
+ if (get_vendor() == ARCH_AMD)
sprintf(phys_pkg_path, "%s%d/cache/index3/id",
PHYS_ID_PATH, cpu_no);
else
diff --git a/tools/testing/selftests/resctrl/settings b/tools/testing/selftests/resctrl/settings
new file mode 100644
index 000000000000..a383f3d4565b
--- /dev/null
+++ b/tools/testing/selftests/resctrl/settings
@@ -0,0 +1,3 @@
+# If running time is longer than 120 seconds when new tests are added in
+# the future, increase timeout here.
+timeout=120
diff --git a/tools/testing/selftests/rseq/rseq-riscv.h b/tools/testing/selftests/rseq/rseq-riscv.h
index b86642f90d7f..3a391c9bf468 100644
--- a/tools/testing/selftests/rseq/rseq-riscv.h
+++ b/tools/testing/selftests/rseq/rseq-riscv.h
@@ -86,7 +86,7 @@ do { \
#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
RSEQ_INJECT_ASM(1) \
- "la "RSEQ_ASM_TMP_REG_1 ", " __rseq_str(cs_label) "\n" \
+ "la " RSEQ_ASM_TMP_REG_1 ", " __rseq_str(cs_label) "\n" \
REG_S RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(rseq_cs) "]\n" \
__rseq_str(label) ":\n"
@@ -103,17 +103,17 @@ do { \
#define RSEQ_ASM_OP_CMPEQ(var, expect, label) \
REG_L RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n" \
- "bne "RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(expect) "] ," \
+ "bne " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(expect) "] ," \
__rseq_str(label) "\n"
#define RSEQ_ASM_OP_CMPEQ32(var, expect, label) \
- "lw "RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n" \
- "bne "RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(expect) "] ," \
+ "lw " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n" \
+ "bne " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(expect) "] ," \
__rseq_str(label) "\n"
#define RSEQ_ASM_OP_CMPNE(var, expect, label) \
REG_L RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n" \
- "beq "RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(expect) "] ," \
+ "beq " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(expect) "] ," \
__rseq_str(label) "\n"
#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \
@@ -127,12 +127,12 @@ do { \
REG_S RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n"
#define RSEQ_ASM_OP_R_LOAD_OFF(offset) \
- "add "RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(offset) "], " \
+ "add " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(offset) "], " \
RSEQ_ASM_TMP_REG_1 "\n" \
REG_L RSEQ_ASM_TMP_REG_1 ", (" RSEQ_ASM_TMP_REG_1 ")\n"
#define RSEQ_ASM_OP_R_ADD(count) \
- "add "RSEQ_ASM_TMP_REG_1 ", " RSEQ_ASM_TMP_REG_1 \
+ "add " RSEQ_ASM_TMP_REG_1 ", " RSEQ_ASM_TMP_REG_1 \
", %[" __rseq_str(count) "]\n"
#define RSEQ_ASM_OP_FINAL_STORE(value, var, post_commit_label) \
@@ -194,8 +194,8 @@ int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
RSEQ_ASM_DEFINE_ABORT(4, abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
[v] "m" (*v),
[expect] "r" (expect),
[newv] "r" (newv)
@@ -251,8 +251,8 @@ int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
RSEQ_ASM_DEFINE_ABORT(4, abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
[v] "m" (*v),
[expectnot] "r" (expectnot),
[load] "m" (*load),
@@ -301,8 +301,8 @@ int rseq_addv(intptr_t *v, intptr_t count, int cpu)
RSEQ_ASM_DEFINE_ABORT(4, abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
[v] "m" (*v),
[count] "r" (count)
RSEQ_INJECT_INPUT
@@ -352,8 +352,8 @@ int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
RSEQ_ASM_DEFINE_ABORT(4, abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
[expect] "r" (expect),
[v] "m" (*v),
[newv] "r" (newv),
@@ -411,8 +411,8 @@ int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
RSEQ_ASM_DEFINE_ABORT(4, abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
[expect] "r" (expect),
[v] "m" (*v),
[newv] "r" (newv),
@@ -472,8 +472,8 @@ int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
RSEQ_ASM_DEFINE_ABORT(4, abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
[v] "m" (*v),
[expect] "r" (expect),
[v2] "m" (*v2),
@@ -532,8 +532,8 @@ int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
RSEQ_ASM_DEFINE_ABORT(4, abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
[expect] "r" (expect),
[v] "m" (*v),
[newv] "r" (newv),
@@ -593,8 +593,8 @@ int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
RSEQ_ASM_DEFINE_ABORT(4, abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
[expect] "r" (expect),
[v] "m" (*v),
[newv] "r" (newv),
@@ -651,8 +651,8 @@ int rseq_offset_deref_addv(intptr_t *ptr, off_t off, intptr_t inc, int cpu)
RSEQ_ASM_DEFINE_ABORT(4, abort)
: /* gcc asm goto does not allow outputs */
: [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (__rseq_abi.cpu_id),
- [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
[ptr] "r" (ptr),
[off] "er" (off),
[inc] "er" (inc)
diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c
index 986b9458efb2..4177f9507bbe 100644
--- a/tools/testing/selftests/rseq/rseq.c
+++ b/tools/testing/selftests/rseq/rseq.c
@@ -111,7 +111,8 @@ void rseq_init(void)
libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset");
libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size");
libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags");
- if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p) {
+ if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p &&
+ *libc_rseq_size_p != 0) {
/* rseq registration owned by glibc */
rseq_offset = *libc_rseq_offset_p;
rseq_size = *libc_rseq_size_p;
diff --git a/tools/testing/selftests/safesetid/Makefile b/tools/testing/selftests/safesetid/Makefile
index fa02c4d5ec13..e815bbf2d0f4 100644
--- a/tools/testing/selftests/safesetid/Makefile
+++ b/tools/testing/selftests/safesetid/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-# Makefile for mount selftests.
+# Makefile for SafeSetID selftest.
CFLAGS = -Wall -O2
LDLIBS = -lcap
diff --git a/tools/testing/selftests/safesetid/safesetid-test.c b/tools/testing/selftests/safesetid/safesetid-test.c
index 4b809c93ba36..eb9bf0aee951 100644
--- a/tools/testing/selftests/safesetid/safesetid-test.c
+++ b/tools/testing/selftests/safesetid/safesetid-test.c
@@ -3,6 +3,7 @@
#include <stdio.h>
#include <errno.h>
#include <pwd.h>
+#include <grp.h>
#include <string.h>
#include <syscall.h>
#include <sys/capability.h>
@@ -16,17 +17,28 @@
#include <stdbool.h>
#include <stdarg.h>
+/*
+ * NOTES about this test:
+ * - requries libcap-dev to be installed on test system
+ * - requires securityfs to me mounted at /sys/kernel/security, e.g.:
+ * mount -n -t securityfs -o nodev,noexec,nosuid securityfs /sys/kernel/security
+ * - needs CONFIG_SECURITYFS and CONFIG_SAFESETID to be enabled
+ */
+
#ifndef CLONE_NEWUSER
# define CLONE_NEWUSER 0x10000000
#endif
-#define ROOT_USER 0
-#define RESTRICTED_PARENT 1
-#define ALLOWED_CHILD1 2
-#define ALLOWED_CHILD2 3
-#define NO_POLICY_USER 4
+#define ROOT_UGID 0
+#define RESTRICTED_PARENT_UGID 1
+#define ALLOWED_CHILD1_UGID 2
+#define ALLOWED_CHILD2_UGID 3
+#define NO_POLICY_UGID 4
+
+#define UGID_POLICY_STRING "1:2\n1:3\n2:2\n3:3\n"
-char* add_whitelist_policy_file = "/sys/kernel/security/safesetid/add_whitelist_policy";
+char* add_uid_whitelist_policy_file = "/sys/kernel/security/safesetid/uid_allowlist_policy";
+char* add_gid_whitelist_policy_file = "/sys/kernel/security/safesetid/gid_allowlist_policy";
static void die(char *fmt, ...)
{
@@ -106,9 +118,10 @@ static void ensure_user_exists(uid_t uid)
die("couldn't open file\n");
if (fseek(fd, 0, SEEK_END))
die("couldn't fseek\n");
- snprintf(name_str, 10, "%d", uid);
+ snprintf(name_str, 10, "user %d", uid);
p.pw_name=name_str;
p.pw_uid=uid;
+ p.pw_gid=uid;
p.pw_gecos="Test account";
p.pw_dir="/dev/null";
p.pw_shell="/bin/false";
@@ -120,9 +133,36 @@ static void ensure_user_exists(uid_t uid)
}
}
+static void ensure_group_exists(gid_t gid)
+{
+ struct group g;
+
+ FILE *fd;
+ char name_str[10];
+
+ if (getgrgid(gid) == NULL) {
+ memset(&g,0x00,sizeof(g));
+ fd=fopen("/etc/group","a");
+ if (fd == NULL)
+ die("couldn't open group file\n");
+ if (fseek(fd, 0, SEEK_END))
+ die("couldn't fseek group file\n");
+ snprintf(name_str, 10, "group %d", gid);
+ g.gr_name=name_str;
+ g.gr_gid=gid;
+ g.gr_passwd=NULL;
+ g.gr_mem=NULL;
+ int value = putgrent(&g,fd);
+ if (value != 0)
+ die("putgrent failed\n");
+ if (fclose(fd))
+ die("fclose failed\n");
+ }
+}
+
static void ensure_securityfs_mounted(void)
{
- int fd = open(add_whitelist_policy_file, O_WRONLY);
+ int fd = open(add_uid_whitelist_policy_file, O_WRONLY);
if (fd < 0) {
if (errno == ENOENT) {
// Need to mount securityfs
@@ -135,39 +175,60 @@ static void ensure_securityfs_mounted(void)
} else {
if (close(fd) != 0) {
die("close of %s failed: %s\n",
- add_whitelist_policy_file, strerror(errno));
+ add_uid_whitelist_policy_file, strerror(errno));
+ }
+ }
+}
+
+static void write_uid_policies()
+{
+ static char *policy_str = UGID_POLICY_STRING;
+ ssize_t written;
+ int fd;
+
+ fd = open(add_uid_whitelist_policy_file, O_WRONLY);
+ if (fd < 0)
+ die("can't open add_uid_whitelist_policy file\n");
+ written = write(fd, policy_str, strlen(policy_str));
+ if (written != strlen(policy_str)) {
+ if (written >= 0) {
+ die("short write to %s\n", add_uid_whitelist_policy_file);
+ } else {
+ die("write to %s failed: %s\n",
+ add_uid_whitelist_policy_file, strerror(errno));
}
}
+ if (close(fd) != 0) {
+ die("close of %s failed: %s\n",
+ add_uid_whitelist_policy_file, strerror(errno));
+ }
}
-static void write_policies(void)
+static void write_gid_policies()
{
- static char *policy_str =
- "1:2\n"
- "1:3\n"
- "2:2\n"
- "3:3\n";
+ static char *policy_str = UGID_POLICY_STRING;
ssize_t written;
int fd;
- fd = open(add_whitelist_policy_file, O_WRONLY);
+ fd = open(add_gid_whitelist_policy_file, O_WRONLY);
if (fd < 0)
- die("can't open add_whitelist_policy file\n");
+ die("can't open add_gid_whitelist_policy file\n");
written = write(fd, policy_str, strlen(policy_str));
if (written != strlen(policy_str)) {
if (written >= 0) {
- die("short write to %s\n", add_whitelist_policy_file);
+ die("short write to %s\n", add_gid_whitelist_policy_file);
} else {
die("write to %s failed: %s\n",
- add_whitelist_policy_file, strerror(errno));
+ add_gid_whitelist_policy_file, strerror(errno));
}
}
if (close(fd) != 0) {
die("close of %s failed: %s\n",
- add_whitelist_policy_file, strerror(errno));
+ add_gid_whitelist_policy_file, strerror(errno));
}
}
+
static bool test_userns(bool expect_success)
{
uid_t uid;
@@ -194,7 +255,7 @@ static bool test_userns(bool expect_success)
printf("preparing file name string failed");
return false;
}
- success = write_file(map_file_name, "0 0 1", uid);
+ success = write_file(map_file_name, "0 %d 1", uid);
return success == expect_success;
}
@@ -258,13 +319,144 @@ static void test_setuid(uid_t child_uid, bool expect_success)
die("should not reach here\n");
}
+static void test_setgid(gid_t child_gid, bool expect_success)
+{
+ pid_t cpid, w;
+ int wstatus;
+
+ cpid = fork();
+ if (cpid == -1) {
+ die("fork\n");
+ }
+
+ if (cpid == 0) { /* Code executed by child */
+ if (setgid(child_gid) < 0)
+ exit(EXIT_FAILURE);
+ if (getgid() == child_gid)
+ exit(EXIT_SUCCESS);
+ else
+ exit(EXIT_FAILURE);
+ } else { /* Code executed by parent */
+ do {
+ w = waitpid(cpid, &wstatus, WUNTRACED | WCONTINUED);
+ if (w == -1) {
+ die("waitpid\n");
+ }
+
+ if (WIFEXITED(wstatus)) {
+ if (WEXITSTATUS(wstatus) == EXIT_SUCCESS) {
+ if (expect_success) {
+ return;
+ } else {
+ die("unexpected success\n");
+ }
+ } else {
+ if (expect_success) {
+ die("unexpected failure\n");
+ } else {
+ return;
+ }
+ }
+ } else if (WIFSIGNALED(wstatus)) {
+ if (WTERMSIG(wstatus) == 9) {
+ if (expect_success)
+ die("killed unexpectedly\n");
+ else
+ return;
+ } else {
+ die("unexpected signal: %d\n", wstatus);
+ }
+ } else {
+ die("unexpected status: %d\n", wstatus);
+ }
+ } while (!WIFEXITED(wstatus) && !WIFSIGNALED(wstatus));
+ }
+
+ die("should not reach here\n");
+}
+
+static void test_setgroups(gid_t* child_groups, size_t len, bool expect_success)
+{
+ pid_t cpid, w;
+ int wstatus;
+ gid_t groupset[len];
+ int i, j;
+
+ cpid = fork();
+ if (cpid == -1) {
+ die("fork\n");
+ }
+
+ if (cpid == 0) { /* Code executed by child */
+ if (setgroups(len, child_groups) != 0)
+ exit(EXIT_FAILURE);
+ if (getgroups(len, groupset) != len)
+ exit(EXIT_FAILURE);
+ for (i = 0; i < len; i++) {
+ for (j = 0; j < len; j++) {
+ if (child_groups[i] == groupset[j])
+ break;
+ if (j == len - 1)
+ exit(EXIT_FAILURE);
+ }
+ }
+ exit(EXIT_SUCCESS);
+ } else { /* Code executed by parent */
+ do {
+ w = waitpid(cpid, &wstatus, WUNTRACED | WCONTINUED);
+ if (w == -1) {
+ die("waitpid\n");
+ }
+
+ if (WIFEXITED(wstatus)) {
+ if (WEXITSTATUS(wstatus) == EXIT_SUCCESS) {
+ if (expect_success) {
+ return;
+ } else {
+ die("unexpected success\n");
+ }
+ } else {
+ if (expect_success) {
+ die("unexpected failure\n");
+ } else {
+ return;
+ }
+ }
+ } else if (WIFSIGNALED(wstatus)) {
+ if (WTERMSIG(wstatus) == 9) {
+ if (expect_success)
+ die("killed unexpectedly\n");
+ else
+ return;
+ } else {
+ die("unexpected signal: %d\n", wstatus);
+ }
+ } else {
+ die("unexpected status: %d\n", wstatus);
+ }
+ } while (!WIFEXITED(wstatus) && !WIFSIGNALED(wstatus));
+ }
+
+ die("should not reach here\n");
+}
+
+
static void ensure_users_exist(void)
{
- ensure_user_exists(ROOT_USER);
- ensure_user_exists(RESTRICTED_PARENT);
- ensure_user_exists(ALLOWED_CHILD1);
- ensure_user_exists(ALLOWED_CHILD2);
- ensure_user_exists(NO_POLICY_USER);
+ ensure_user_exists(ROOT_UGID);
+ ensure_user_exists(RESTRICTED_PARENT_UGID);
+ ensure_user_exists(ALLOWED_CHILD1_UGID);
+ ensure_user_exists(ALLOWED_CHILD2_UGID);
+ ensure_user_exists(NO_POLICY_UGID);
+}
+
+static void ensure_groups_exist(void)
+{
+ ensure_group_exists(ROOT_UGID);
+ ensure_group_exists(RESTRICTED_PARENT_UGID);
+ ensure_group_exists(ALLOWED_CHILD1_UGID);
+ ensure_group_exists(ALLOWED_CHILD2_UGID);
+ ensure_group_exists(NO_POLICY_UGID);
}
static void drop_caps(bool setid_retained)
@@ -283,41 +475,52 @@ static void drop_caps(bool setid_retained)
int main(int argc, char **argv)
{
+ ensure_groups_exist();
ensure_users_exist();
ensure_securityfs_mounted();
- write_policies();
+ write_uid_policies();
+ write_gid_policies();
if (prctl(PR_SET_KEEPCAPS, 1L))
die("Error with set keepcaps\n");
- // First test to make sure we can write userns mappings from a user
- // that doesn't have any restrictions (as long as it has CAP_SETUID);
- if (setuid(NO_POLICY_USER) < 0)
- die("Error with set uid(%d)\n", NO_POLICY_USER);
- if (setgid(NO_POLICY_USER) < 0)
- die("Error with set gid(%d)\n", NO_POLICY_USER);
-
+ // First test to make sure we can write userns mappings from a non-root
+ // user that doesn't have any restrictions (as long as it has
+ // CAP_SETUID);
+ if (setgid(NO_POLICY_UGID) < 0)
+ die("Error with set gid(%d)\n", NO_POLICY_UGID);
+ if (setuid(NO_POLICY_UGID) < 0)
+ die("Error with set uid(%d)\n", NO_POLICY_UGID);
// Take away all but setid caps
drop_caps(true);
-
// Need PR_SET_DUMPABLE flag set so we can write /proc/[pid]/uid_map
// from non-root parent process.
if (prctl(PR_SET_DUMPABLE, 1, 0, 0, 0))
die("Error with set dumpable\n");
-
if (!test_userns(true)) {
die("test_userns failed when it should work\n");
}
- if (setuid(RESTRICTED_PARENT) < 0)
- die("Error with set uid(%d)\n", RESTRICTED_PARENT);
- if (setgid(RESTRICTED_PARENT) < 0)
- die("Error with set gid(%d)\n", RESTRICTED_PARENT);
+ // Now switch to a user/group with restrictions
+ if (setgid(RESTRICTED_PARENT_UGID) < 0)
+ die("Error with set gid(%d)\n", RESTRICTED_PARENT_UGID);
+ if (setuid(RESTRICTED_PARENT_UGID) < 0)
+ die("Error with set uid(%d)\n", RESTRICTED_PARENT_UGID);
+
+ test_setuid(ROOT_UGID, false);
+ test_setuid(ALLOWED_CHILD1_UGID, true);
+ test_setuid(ALLOWED_CHILD2_UGID, true);
+ test_setuid(NO_POLICY_UGID, false);
+
+ test_setgid(ROOT_UGID, false);
+ test_setgid(ALLOWED_CHILD1_UGID, true);
+ test_setgid(ALLOWED_CHILD2_UGID, true);
+ test_setgid(NO_POLICY_UGID, false);
- test_setuid(ROOT_USER, false);
- test_setuid(ALLOWED_CHILD1, true);
- test_setuid(ALLOWED_CHILD2, true);
- test_setuid(NO_POLICY_USER, false);
+ gid_t allowed_supp_groups[2] = {ALLOWED_CHILD1_UGID, ALLOWED_CHILD2_UGID};
+ gid_t disallowed_supp_groups[2] = {ROOT_UGID, NO_POLICY_UGID};
+ test_setgroups(allowed_supp_groups, 2, true);
+ test_setgroups(disallowed_supp_groups, 2, false);
if (!test_userns(false)) {
die("test_userns worked when it should fail\n");
@@ -328,8 +531,12 @@ int main(int argc, char **argv)
test_setuid(2, false);
test_setuid(3, false);
test_setuid(4, false);
+ test_setgid(2, false);
+ test_setgid(3, false);
+ test_setgid(4, false);
// NOTE: this test doesn't clean up users that were created in
// /etc/passwd or flush policies that were added to the LSM.
+ printf("test successful!\n");
return EXIT_SUCCESS;
}
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 136df5b76319..4ae6c8991307 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -809,7 +809,7 @@ void kill_thread_or_group(struct __test_metadata *_metadata,
.len = (unsigned short)ARRAY_SIZE(filter_thread),
.filter = filter_thread,
};
- int kill = kill_how == KILL_PROCESS ? SECCOMP_RET_KILL_PROCESS : 0xAAAAAAAAA;
+ int kill = kill_how == KILL_PROCESS ? SECCOMP_RET_KILL_PROCESS : 0xAAAAAAAA;
struct sock_filter filter_process[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
diff --git a/tools/testing/selftests/sync/config b/tools/testing/selftests/sync/config
index 47ff5afc3727..64c60f38b446 100644
--- a/tools/testing/selftests/sync/config
+++ b/tools/testing/selftests/sync/config
@@ -1,3 +1,2 @@
CONFIG_STAGING=y
-CONFIG_ANDROID=y
CONFIG_SW_SYNC=y
diff --git a/tools/testing/selftests/sysctl/sysctl.sh b/tools/testing/selftests/sysctl/sysctl.sh
index 19515dcb7d04..f50778a3d744 100755
--- a/tools/testing/selftests/sysctl/sysctl.sh
+++ b/tools/testing/selftests/sysctl/sysctl.sh
@@ -40,6 +40,7 @@ ALL_TESTS="$ALL_TESTS 0004:1:1:uint_0001"
ALL_TESTS="$ALL_TESTS 0005:3:1:int_0003"
ALL_TESTS="$ALL_TESTS 0006:50:1:bitmap_0001"
ALL_TESTS="$ALL_TESTS 0007:1:1:boot_int"
+ALL_TESTS="$ALL_TESTS 0008:1:1:match_int"
function allow_user_defaults()
{
@@ -785,6 +786,27 @@ sysctl_test_0007()
return $ksft_skip
}
+sysctl_test_0008()
+{
+ TARGET="${SYSCTL}/match_int"
+ if [ ! -f $TARGET ]; then
+ echo "Skipping test for $TARGET as it is not present ..."
+ return $ksft_skip
+ fi
+
+ echo -n "Testing if $TARGET is matched in kernel"
+ ORIG_VALUE=$(cat "${TARGET}")
+
+ if [ $ORIG_VALUE -ne 1 ]; then
+ echo "TEST FAILED"
+ rc=1
+ test_rc
+ fi
+
+ echo "ok"
+ return 0
+}
+
list_tests()
{
echo "Test ID list:"
@@ -800,6 +822,7 @@ list_tests()
echo "0005 x $(get_test_count 0005) - tests proc_douintvec() array"
echo "0006 x $(get_test_count 0006) - tests proc_do_large_bitmap()"
echo "0007 x $(get_test_count 0007) - tests setting sysctl from kernel boot param"
+ echo "0008 x $(get_test_count 0008) - tests sysctl macro values match"
}
usage()
diff --git a/tools/testing/selftests/tc-testing/.gitignore b/tools/testing/selftests/tc-testing/.gitignore
index d52f65de23b4..9fe1cef72728 100644
--- a/tools/testing/selftests/tc-testing/.gitignore
+++ b/tools/testing/selftests/tc-testing/.gitignore
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
__pycache__/
*.pyc
-plugins/
*.xml
*.tap
tdc_config_local.py
diff --git a/tools/testing/selftests/tc-testing/Makefile b/tools/testing/selftests/tc-testing/Makefile
index 4d639279f41e..cb553eac9f41 100644
--- a/tools/testing/selftests/tc-testing/Makefile
+++ b/tools/testing/selftests/tc-testing/Makefile
@@ -5,7 +5,6 @@ top_srcdir = $(abspath ../../../..)
APIDIR := $(top_scrdir)/include/uapi
TEST_GEN_FILES = action.o
-KSFT_KHDR_INSTALL := 1
include ../lib.mk
PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
index b24494c6f546..c652e8c1157d 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
@@ -609,5 +609,82 @@
"teardown": [
"$TC actions flush action gact"
]
+ },
+ {
+ "id": "7f52",
+ "name": "Try to flush action which is referenced by filter",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC qdisc add dev $DEV1 ingress",
+ "$TC actions add action pass index 1",
+ "$TC filter add dev $DEV1 protocol all ingress prio 1 handle 0x1234 matchall action gact index 1"
+ ],
+ "cmdUnderTest": "$TC actions flush action gact",
+ "expExitCode": "1",
+ "verifyCmd": "$TC actions ls action gact",
+ "matchPattern": "total acts 1.*action order [0-9]*: gact action pass.*index 1 ref 2 bind 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress",
+ [
+ "sleep 1; $TC actions flush action gact",
+ 0,
+ 1
+ ]
+ ]
+ },
+ {
+ "id": "ae1e",
+ "name": "Try to flush actions when last one is referenced by filter",
+ "category": [
+ "actions",
+ "gact"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ [
+ "$TC actions flush action gact",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC qdisc add dev $DEV1 ingress",
+ [
+ "$TC actions add action pass index 1",
+ 0,
+ 1,
+ 255
+ ],
+ "$TC actions add action reclassify index 2",
+ "$TC actions add action drop index 3",
+ "$TC filter add dev $DEV1 protocol all ingress prio 1 handle 0x1234 matchall action gact index 3"
+ ],
+ "cmdUnderTest": "$TC actions flush action gact",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions ls action gact",
+ "matchPattern": "total acts 1.*action order [0-9]*: gact action drop.*index 3 ref 2 bind 1",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV1 ingress",
+ [
+ "sleep 1; $TC actions flush action gact",
+ 0,
+ 1
+ ]
+ ]
}
]
diff --git a/tools/testing/selftests/timens/Makefile b/tools/testing/selftests/timens/Makefile
index 3a5936cc10ab..f0d51d4d2c87 100644
--- a/tools/testing/selftests/timens/Makefile
+++ b/tools/testing/selftests/timens/Makefile
@@ -1,4 +1,4 @@
-TEST_GEN_PROGS := timens timerfd timer clock_nanosleep procfs exec futex
+TEST_GEN_PROGS := timens timerfd timer clock_nanosleep procfs exec futex vfork_exec
TEST_GEN_PROGS_EXTENDED := gettime_perf
CFLAGS := -Wall -Werror -pthread
diff --git a/tools/testing/selftests/timens/vfork_exec.c b/tools/testing/selftests/timens/vfork_exec.c
new file mode 100644
index 000000000000..e6ccd900f30a
--- /dev/null
+++ b/tools/testing/selftests/timens/vfork_exec.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <time.h>
+#include <unistd.h>
+#include <string.h>
+
+#include "log.h"
+#include "timens.h"
+
+#define OFFSET (36000)
+
+int main(int argc, char *argv[])
+{
+ struct timespec now, tst;
+ int status, i;
+ pid_t pid;
+
+ if (argc > 1) {
+ if (sscanf(argv[1], "%ld", &now.tv_sec) != 1)
+ return pr_perror("sscanf");
+
+ for (i = 0; i < 2; i++) {
+ _gettime(CLOCK_MONOTONIC, &tst, i);
+ if (abs(tst.tv_sec - now.tv_sec) > 5)
+ return pr_fail("%ld %ld\n", now.tv_sec, tst.tv_sec);
+ }
+ return 0;
+ }
+
+ nscheck();
+
+ ksft_set_plan(1);
+
+ clock_gettime(CLOCK_MONOTONIC, &now);
+
+ if (unshare_timens())
+ return 1;
+
+ if (_settime(CLOCK_MONOTONIC, OFFSET))
+ return 1;
+
+ for (i = 0; i < 2; i++) {
+ _gettime(CLOCK_MONOTONIC, &tst, i);
+ if (abs(tst.tv_sec - now.tv_sec) > 5)
+ return pr_fail("%ld %ld\n",
+ now.tv_sec, tst.tv_sec);
+ }
+
+ pid = vfork();
+ if (pid < 0)
+ return pr_perror("fork");
+
+ if (pid == 0) {
+ char now_str[64];
+ char *cargv[] = {"exec", now_str, NULL};
+ char *cenv[] = {NULL};
+
+ // Check that we are still in the source timens.
+ for (i = 0; i < 2; i++) {
+ _gettime(CLOCK_MONOTONIC, &tst, i);
+ if (abs(tst.tv_sec - now.tv_sec) > 5)
+ return pr_fail("%ld %ld\n",
+ now.tv_sec, tst.tv_sec);
+ }
+
+ /* Check for proper vvar offsets after execve. */
+ snprintf(now_str, sizeof(now_str), "%ld", now.tv_sec + OFFSET);
+ execve("/proc/self/exe", cargv, cenv);
+ return pr_perror("execve");
+ }
+
+ if (waitpid(pid, &status, 0) != pid)
+ return pr_perror("waitpid");
+
+ if (status)
+ ksft_exit_fail();
+
+ ksft_test_result_pass("exec\n");
+ ksft_exit_pass();
+ return 0;
+}
diff --git a/tools/testing/selftests/timers/adjtick.c b/tools/testing/selftests/timers/adjtick.c
index 54d8d87f36b3..47e05fdc32c5 100644
--- a/tools/testing/selftests/timers/adjtick.c
+++ b/tools/testing/selftests/timers/adjtick.c
@@ -165,7 +165,7 @@ int check_tick_adj(long tickval)
return 0;
}
-int main(int argv, char **argc)
+int main(int argc, char **argv)
{
struct timespec raw;
long tick, max, interval, err;
diff --git a/tools/testing/selftests/timers/alarmtimer-suspend.c b/tools/testing/selftests/timers/alarmtimer-suspend.c
index 54da4b088f4c..4332b494103d 100644
--- a/tools/testing/selftests/timers/alarmtimer-suspend.c
+++ b/tools/testing/selftests/timers/alarmtimer-suspend.c
@@ -92,7 +92,7 @@ long long timespec_sub(struct timespec a, struct timespec b)
return ret;
}
-int final_ret = 0;
+int final_ret;
void sigalarm(int signo)
{
diff --git a/tools/testing/selftests/timers/change_skew.c b/tools/testing/selftests/timers/change_skew.c
index c4eab7124990..992a77f2a74c 100644
--- a/tools/testing/selftests/timers/change_skew.c
+++ b/tools/testing/selftests/timers/change_skew.c
@@ -55,7 +55,7 @@ int change_skew_test(int ppm)
}
-int main(int argv, char **argc)
+int main(int argc, char **argv)
{
struct timex tx;
int i, ret;
diff --git a/tools/testing/selftests/timers/clocksource-switch.c b/tools/testing/selftests/timers/clocksource-switch.c
index ef8eb3604595..c5264594064c 100644
--- a/tools/testing/selftests/timers/clocksource-switch.c
+++ b/tools/testing/selftests/timers/clocksource-switch.c
@@ -23,17 +23,17 @@
*/
+#include <fcntl.h>
#include <stdio.h>
-#include <unistd.h>
#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
#include <sys/time.h>
#include <sys/timex.h>
-#include <time.h>
#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <string.h>
#include <sys/wait.h>
+#include <time.h>
+#include <unistd.h>
#include "../kselftest.h"
@@ -110,21 +110,40 @@ int run_tests(int secs)
sprintf(buf, "./inconsistency-check -t %i", secs);
ret = system(buf);
- if (ret)
- return ret;
+ if (WIFEXITED(ret) && WEXITSTATUS(ret))
+ return WEXITSTATUS(ret);
ret = system("./nanosleep");
- return ret;
+ return WIFEXITED(ret) ? WEXITSTATUS(ret) : 0;
}
char clocksource_list[10][30];
-int main(int argv, char **argc)
+int main(int argc, char **argv)
{
char orig_clk[512];
- int count, i, status;
+ int count, i, status, opt;
+ int do_sanity_check = 1;
+ int runtime = 60;
pid_t pid;
+ /* Process arguments */
+ while ((opt = getopt(argc, argv, "st:")) != -1) {
+ switch (opt) {
+ case 's':
+ do_sanity_check = 0;
+ break;
+ case 't':
+ runtime = atoi(optarg);
+ break;
+ default:
+ printf("Usage: %s [-s] [-t <secs>]\n", argv[0]);
+ printf(" -s: skip sanity checks\n");
+ printf(" -t: Number of seconds to run\n");
+ exit(-1);
+ }
+ }
+
get_cur_clocksource(orig_clk, 512);
count = get_clocksources(clocksource_list);
@@ -135,23 +154,25 @@ int main(int argv, char **argc)
}
/* Check everything is sane before we start switching asynchronously */
- for (i = 0; i < count; i++) {
- printf("Validating clocksource %s\n", clocksource_list[i]);
- if (change_clocksource(clocksource_list[i])) {
- status = -1;
- goto out;
- }
- if (run_tests(5)) {
- status = -1;
- goto out;
+ if (do_sanity_check) {
+ for (i = 0; i < count; i++) {
+ printf("Validating clocksource %s\n",
+ clocksource_list[i]);
+ if (change_clocksource(clocksource_list[i])) {
+ status = -1;
+ goto out;
+ }
+ if (run_tests(5)) {
+ status = -1;
+ goto out;
+ }
}
}
-
printf("Running Asynchronous Switching Tests...\n");
pid = fork();
if (!pid)
- return run_tests(60);
+ return run_tests(runtime);
while (pid != waitpid(pid, &status, WNOHANG))
for (i = 0; i < count; i++)
@@ -162,7 +183,9 @@ int main(int argv, char **argc)
out:
change_clocksource(orig_clk);
- if (status)
- return ksft_exit_fail();
- return ksft_exit_pass();
+ /* Print at the end to not mix output with child process */
+ ksft_print_header();
+ ksft_set_plan(1);
+ ksft_test_result(!status, "clocksource-switch\n");
+ ksft_exit(!status);
}
diff --git a/tools/testing/selftests/timers/inconsistency-check.c b/tools/testing/selftests/timers/inconsistency-check.c
index e6756d9c60a7..36a49fba6c9b 100644
--- a/tools/testing/selftests/timers/inconsistency-check.c
+++ b/tools/testing/selftests/timers/inconsistency-check.c
@@ -122,30 +122,28 @@ int consistency_test(int clock_type, unsigned long seconds)
if (inconsistent >= 0) {
unsigned long long delta;
- printf("\%s\n", start_str);
+ ksft_print_msg("\%s\n", start_str);
for (i = 0; i < CALLS_PER_LOOP; i++) {
if (i == inconsistent)
- printf("--------------------\n");
- printf("%lu:%lu\n", list[i].tv_sec,
+ ksft_print_msg("--------------------\n");
+ ksft_print_msg("%lu:%lu\n", list[i].tv_sec,
list[i].tv_nsec);
if (i == inconsistent + 1)
- printf("--------------------\n");
+ ksft_print_msg("--------------------\n");
}
delta = list[inconsistent].tv_sec * NSEC_PER_SEC;
delta += list[inconsistent].tv_nsec;
delta -= list[inconsistent+1].tv_sec * NSEC_PER_SEC;
delta -= list[inconsistent+1].tv_nsec;
- printf("Delta: %llu ns\n", delta);
+ ksft_print_msg("Delta: %llu ns\n", delta);
fflush(0);
/* timestamp inconsistency*/
t = time(0);
- printf("%s\n", ctime(&t));
- printf("[FAILED]\n");
+ ksft_print_msg("%s\n", ctime(&t));
return -1;
}
now = list[0].tv_sec;
}
- printf("[OK]\n");
return 0;
}
@@ -178,16 +176,22 @@ int main(int argc, char *argv[])
setbuf(stdout, NULL);
+ ksft_print_header();
+ ksft_set_plan(maxclocks - userclock);
+
for (clockid = userclock; clockid < maxclocks; clockid++) {
- if (clockid == CLOCK_HWSPECIFIC)
+ if (clockid == CLOCK_HWSPECIFIC || clock_gettime(clockid, &ts)) {
+ ksft_test_result_skip("%-31s\n", clockstring(clockid));
continue;
+ }
- if (!clock_gettime(clockid, &ts)) {
- printf("Consistent %-30s ", clockstring(clockid));
- if (consistency_test(clockid, runtime))
- return ksft_exit_fail();
+ if (consistency_test(clockid, runtime)) {
+ ksft_test_result_fail("%-31s\n", clockstring(clockid));
+ ksft_exit_fail();
+ } else {
+ ksft_test_result_pass("%-31s\n", clockstring(clockid));
}
}
- return ksft_exit_pass();
+ ksft_exit_pass();
}
diff --git a/tools/testing/selftests/timers/nanosleep.c b/tools/testing/selftests/timers/nanosleep.c
index 71b5441c2fd9..df1d03516e7b 100644
--- a/tools/testing/selftests/timers/nanosleep.c
+++ b/tools/testing/selftests/timers/nanosleep.c
@@ -133,33 +133,37 @@ int main(int argc, char **argv)
long long length;
int clockid, ret;
+ ksft_print_header();
+ ksft_set_plan(NR_CLOCKIDS);
+
for (clockid = CLOCK_REALTIME; clockid < NR_CLOCKIDS; clockid++) {
/* Skip cputime clockids since nanosleep won't increment cputime */
if (clockid == CLOCK_PROCESS_CPUTIME_ID ||
clockid == CLOCK_THREAD_CPUTIME_ID ||
- clockid == CLOCK_HWSPECIFIC)
+ clockid == CLOCK_HWSPECIFIC) {
+ ksft_test_result_skip("%-31s\n", clockstring(clockid));
continue;
+ }
- printf("Nanosleep %-31s ", clockstring(clockid));
fflush(stdout);
length = 10;
while (length <= (NSEC_PER_SEC * 10)) {
ret = nanosleep_test(clockid, length);
if (ret == UNSUPPORTED) {
- printf("[UNSUPPORTED]\n");
+ ksft_test_result_skip("%-31s\n", clockstring(clockid));
goto next;
}
if (ret < 0) {
- printf("[FAILED]\n");
- return ksft_exit_fail();
+ ksft_test_result_fail("%-31s\n", clockstring(clockid));
+ ksft_exit_fail();
}
length *= 100;
}
- printf("[OK]\n");
+ ksft_test_result_pass("%-31s\n", clockstring(clockid));
next:
ret = 0;
}
- return ksft_exit_pass();
+ ksft_exit_pass();
}
diff --git a/tools/testing/selftests/timers/raw_skew.c b/tools/testing/selftests/timers/raw_skew.c
index b41d8dd0c40c..5beceeed0d11 100644
--- a/tools/testing/selftests/timers/raw_skew.c
+++ b/tools/testing/selftests/timers/raw_skew.c
@@ -89,7 +89,7 @@ void get_monotonic_and_raw(struct timespec *mon, struct timespec *raw)
}
}
-int main(int argv, char **argc)
+int main(int argc, char **argv)
{
struct timespec mon, raw, start, end;
long long delta1, delta2, interval, eppm, ppm;
diff --git a/tools/testing/selftests/timers/skew_consistency.c b/tools/testing/selftests/timers/skew_consistency.c
index 8066be9aff11..63913f75b384 100644
--- a/tools/testing/selftests/timers/skew_consistency.c
+++ b/tools/testing/selftests/timers/skew_consistency.c
@@ -38,7 +38,7 @@
#define NSEC_PER_SEC 1000000000LL
-int main(int argv, char **argc)
+int main(int argc, char **argv)
{
struct timex tx;
int ret, ppm;
diff --git a/tools/testing/selftests/timers/valid-adjtimex.c b/tools/testing/selftests/timers/valid-adjtimex.c
index 5397de708d3c..48b9a803235a 100644
--- a/tools/testing/selftests/timers/valid-adjtimex.c
+++ b/tools/testing/selftests/timers/valid-adjtimex.c
@@ -40,7 +40,7 @@
#define ADJ_SETOFFSET 0x0100
#include <sys/syscall.h>
-static int clock_adjtime(clockid_t id, struct timex *tx)
+int clock_adjtime(clockid_t id, struct timex *tx)
{
return syscall(__NR_clock_adjtime, id, tx);
}
diff --git a/tools/testing/selftests/tpm2/settings b/tools/testing/selftests/tpm2/settings
new file mode 100644
index 000000000000..a62d2fa1275c
--- /dev/null
+++ b/tools/testing/selftests/tpm2/settings
@@ -0,0 +1 @@
+timeout=600
diff --git a/tools/testing/selftests/vm/.gitignore b/tools/testing/selftests/vm/.gitignore
index d7507f3c7c76..31e5eea2a9b9 100644
--- a/tools/testing/selftests/vm/.gitignore
+++ b/tools/testing/selftests/vm/.gitignore
@@ -9,7 +9,9 @@ map_hugetlb
map_populate
thuge-gen
compaction_test
+migration
mlock2-tests
+mrelease_test
mremap_dontunmap
mremap_test
on-fault-limit
@@ -29,5 +31,6 @@ write_to_hugetlbfs
hmm-tests
memfd_secret
local_config.*
+soft-dirty
split_huge_page_test
ksm_tests
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 5b1ecd00695b..108587cb327a 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -36,20 +36,23 @@ TEST_GEN_FILES += hugepage-mremap
TEST_GEN_FILES += hugepage-shm
TEST_GEN_FILES += hugepage-vmemmap
TEST_GEN_FILES += khugepaged
-TEST_GEN_FILES += madv_populate
+TEST_GEN_PROGS = madv_populate
TEST_GEN_FILES += map_fixed_noreplace
TEST_GEN_FILES += map_hugetlb
TEST_GEN_FILES += map_populate
TEST_GEN_FILES += memfd_secret
+TEST_GEN_FILES += migration
TEST_GEN_FILES += mlock-random-test
TEST_GEN_FILES += mlock2-tests
+TEST_GEN_FILES += mrelease_test
TEST_GEN_FILES += mremap_dontunmap
TEST_GEN_FILES += mremap_test
TEST_GEN_FILES += on-fault-limit
TEST_GEN_FILES += thuge-gen
TEST_GEN_FILES += transhuge-stress
TEST_GEN_FILES += userfaultfd
-TEST_GEN_FILES += split_huge_page_test
+TEST_GEN_PROGS += soft-dirty
+TEST_GEN_PROGS += split_huge_page_test
TEST_GEN_FILES += ksm_tests
ifeq ($(MACHINE),x86_64)
@@ -89,10 +92,14 @@ endif
TEST_PROGS := run_vmtests.sh
TEST_FILES := test_vmalloc.sh
+TEST_FILES += test_hmm.sh
-KSFT_KHDR_INSTALL := 1
include ../lib.mk
+$(OUTPUT)/madv_populate: vm_util.c
+$(OUTPUT)/soft-dirty: vm_util.c
+$(OUTPUT)/split_huge_page_test: vm_util.c
+
ifeq ($(MACHINE),x86_64)
BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32))
BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64))
@@ -149,6 +156,8 @@ $(OUTPUT)/hmm-tests: LDLIBS += $(HMM_EXTRA_LIBS)
$(OUTPUT)/ksm_tests: LDLIBS += -lnuma
+$(OUTPUT)/migration: LDLIBS += -lnuma
+
local_config.mk local_config.h: check_config.sh
/bin/sh ./check_config.sh $(CC)
diff --git a/tools/testing/selftests/vm/config b/tools/testing/selftests/vm/config
index 60e82da0de85..be087c4bc396 100644
--- a/tools/testing/selftests/vm/config
+++ b/tools/testing/selftests/vm/config
@@ -4,3 +4,5 @@ CONFIG_TEST_VMALLOC=m
CONFIG_DEVICE_PRIVATE=y
CONFIG_TEST_HMM=m
CONFIG_GUP_TEST=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_MEM_SOFT_DIRTY=y
diff --git a/tools/testing/selftests/vm/gup_test.c b/tools/testing/selftests/vm/gup_test.c
index cda837a14736..a309876d832f 100644
--- a/tools/testing/selftests/vm/gup_test.c
+++ b/tools/testing/selftests/vm/gup_test.c
@@ -1,7 +1,9 @@
#include <fcntl.h>
+#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
+#include <dirent.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
@@ -9,6 +11,7 @@
#include <pthread.h>
#include <assert.h>
#include "../../../../mm/gup_test.h"
+#include "../kselftest.h"
#include "util.h"
@@ -18,6 +21,8 @@
#define FOLL_WRITE 0x01 /* check pte is writable */
#define FOLL_TOUCH 0x02 /* mark page accessed */
+#define GUP_TEST_FILE "/sys/kernel/debug/gup_test"
+
static unsigned long cmd = GUP_FAST_BENCHMARK;
static int gup_fd, repeats = 1;
static unsigned long size = 128 * MB;
@@ -204,10 +209,25 @@ int main(int argc, char **argv)
if (write)
gup.gup_flags |= FOLL_WRITE;
- gup_fd = open("/sys/kernel/debug/gup_test", O_RDWR);
+ gup_fd = open(GUP_TEST_FILE, O_RDWR);
if (gup_fd == -1) {
- perror("open");
- exit(1);
+ switch (errno) {
+ case EACCES:
+ if (getuid())
+ printf("Please run this test as root\n");
+ break;
+ case ENOENT:
+ if (opendir("/sys/kernel/debug") == NULL) {
+ printf("mount debugfs at /sys/kernel/debug\n");
+ break;
+ }
+ printf("check if CONFIG_GUP_TEST is enabled in kernel config\n");
+ break;
+ default:
+ perror("failed to open " GUP_TEST_FILE);
+ break;
+ }
+ exit(KSFT_SKIP);
}
p = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, filed, 0);
diff --git a/tools/testing/selftests/vm/hugepage-mremap.c b/tools/testing/selftests/vm/hugepage-mremap.c
index 1d689084a54b..585978f181ed 100644
--- a/tools/testing/selftests/vm/hugepage-mremap.c
+++ b/tools/testing/selftests/vm/hugepage-mremap.c
@@ -178,6 +178,12 @@ int main(int argc, char *argv[])
munmap(addr, length);
+ addr = mremap(addr, length, length, 0);
+ if (addr != MAP_FAILED) {
+ printf("mremap: Expected failure, but call succeeded\n");
+ exit(1);
+ }
+
close(fd);
unlink(argv[argc-1]);
diff --git a/tools/testing/selftests/vm/ksm_tests.c b/tools/testing/selftests/vm/ksm_tests.c
index fd85f15869d1..f5e4e0bbd081 100644
--- a/tools/testing/selftests/vm/ksm_tests.c
+++ b/tools/testing/selftests/vm/ksm_tests.c
@@ -54,6 +54,7 @@ static int ksm_write_sysfs(const char *file_path, unsigned long val)
}
if (fprintf(f, "%lu", val) < 0) {
perror("fprintf");
+ fclose(f);
return 1;
}
fclose(f);
@@ -72,6 +73,7 @@ static int ksm_read_sysfs(const char *file_path, unsigned long *val)
}
if (fscanf(f, "%lu", val) != 1) {
perror("fscanf");
+ fclose(f);
return 1;
}
fclose(f);
@@ -221,7 +223,8 @@ static bool assert_ksm_pages_count(long dupl_page_count)
static int ksm_save_def(struct ksm_sysfs *ksm_sysfs)
{
if (ksm_read_sysfs(KSM_FP("max_page_sharing"), &ksm_sysfs->max_page_sharing) ||
- ksm_read_sysfs(KSM_FP("merge_across_nodes"), &ksm_sysfs->merge_across_nodes) ||
+ numa_available() ? 0 :
+ ksm_read_sysfs(KSM_FP("merge_across_nodes"), &ksm_sysfs->merge_across_nodes) ||
ksm_read_sysfs(KSM_FP("sleep_millisecs"), &ksm_sysfs->sleep_millisecs) ||
ksm_read_sysfs(KSM_FP("pages_to_scan"), &ksm_sysfs->pages_to_scan) ||
ksm_read_sysfs(KSM_FP("run"), &ksm_sysfs->run) ||
@@ -236,7 +239,8 @@ static int ksm_save_def(struct ksm_sysfs *ksm_sysfs)
static int ksm_restore(struct ksm_sysfs *ksm_sysfs)
{
if (ksm_write_sysfs(KSM_FP("max_page_sharing"), ksm_sysfs->max_page_sharing) ||
- ksm_write_sysfs(KSM_FP("merge_across_nodes"), ksm_sysfs->merge_across_nodes) ||
+ numa_available() ? 0 :
+ ksm_write_sysfs(KSM_FP("merge_across_nodes"), ksm_sysfs->merge_across_nodes) ||
ksm_write_sysfs(KSM_FP("pages_to_scan"), ksm_sysfs->pages_to_scan) ||
ksm_write_sysfs(KSM_FP("run"), ksm_sysfs->run) ||
ksm_write_sysfs(KSM_FP("sleep_millisecs"), ksm_sysfs->sleep_millisecs) ||
@@ -720,7 +724,8 @@ int main(int argc, char *argv[])
if (ksm_write_sysfs(KSM_FP("run"), 2) ||
ksm_write_sysfs(KSM_FP("sleep_millisecs"), 0) ||
- ksm_write_sysfs(KSM_FP("merge_across_nodes"), 1) ||
+ numa_available() ? 0 :
+ ksm_write_sysfs(KSM_FP("merge_across_nodes"), 1) ||
ksm_write_sysfs(KSM_FP("pages_to_scan"), page_count))
return KSFT_FAIL;
diff --git a/tools/testing/selftests/vm/madv_populate.c b/tools/testing/selftests/vm/madv_populate.c
index 3ee0e8275600..715a42e8e2cd 100644
--- a/tools/testing/selftests/vm/madv_populate.c
+++ b/tools/testing/selftests/vm/madv_populate.c
@@ -18,6 +18,7 @@
#include <sys/mman.h>
#include "../kselftest.h"
+#include "vm_util.h"
/*
* For now, we're using 2 MiB of private anonymous memory for all tests.
@@ -26,18 +27,6 @@
static size_t pagesize;
-static uint64_t pagemap_get_entry(int fd, char *start)
-{
- const unsigned long pfn = (unsigned long)start / pagesize;
- uint64_t entry;
- int ret;
-
- ret = pread(fd, &entry, sizeof(entry), pfn * sizeof(entry));
- if (ret != sizeof(entry))
- ksft_exit_fail_msg("reading pagemap failed\n");
- return entry;
-}
-
static bool pagemap_is_populated(int fd, char *start)
{
uint64_t entry = pagemap_get_entry(fd, start);
@@ -46,13 +35,6 @@ static bool pagemap_is_populated(int fd, char *start)
return entry & 0xc000000000000000ull;
}
-static bool pagemap_is_softdirty(int fd, char *start)
-{
- uint64_t entry = pagemap_get_entry(fd, start);
-
- return entry & 0x0080000000000000ull;
-}
-
static void sense_support(void)
{
char *addr;
@@ -258,20 +240,6 @@ static bool range_is_not_softdirty(char *start, ssize_t size)
return ret;
}
-static void clear_softdirty(void)
-{
- int fd = open("/proc/self/clear_refs", O_WRONLY);
- const char *ctrl = "4";
- int ret;
-
- if (fd < 0)
- ksft_exit_fail_msg("opening clear_refs failed\n");
- ret = write(fd, ctrl, strlen(ctrl));
- if (ret != strlen(ctrl))
- ksft_exit_fail_msg("writing clear_refs failed\n");
- close(fd);
-}
-
static void test_softdirty(void)
{
char *addr;
diff --git a/tools/testing/selftests/vm/migration.c b/tools/testing/selftests/vm/migration.c
new file mode 100644
index 000000000000..1cec8425e3ca
--- /dev/null
+++ b/tools/testing/selftests/vm/migration.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The main purpose of the tests here is to exercise the migration entry code
+ * paths in the kernel.
+ */
+
+#include "../kselftest_harness.h"
+#include <strings.h>
+#include <pthread.h>
+#include <numa.h>
+#include <numaif.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <signal.h>
+#include <time.h>
+
+#define TWOMEG (2<<20)
+#define RUNTIME (60)
+
+#define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
+
+FIXTURE(migration)
+{
+ pthread_t *threads;
+ pid_t *pids;
+ int nthreads;
+ int n1;
+ int n2;
+};
+
+FIXTURE_SETUP(migration)
+{
+ int n;
+
+ ASSERT_EQ(numa_available(), 0);
+ self->nthreads = numa_num_task_cpus() - 1;
+ self->n1 = -1;
+ self->n2 = -1;
+
+ for (n = 0; n < numa_max_possible_node(); n++)
+ if (numa_bitmask_isbitset(numa_all_nodes_ptr, n)) {
+ if (self->n1 == -1) {
+ self->n1 = n;
+ } else {
+ self->n2 = n;
+ break;
+ }
+ }
+
+ self->threads = malloc(self->nthreads * sizeof(*self->threads));
+ ASSERT_NE(self->threads, NULL);
+ self->pids = malloc(self->nthreads * sizeof(*self->pids));
+ ASSERT_NE(self->pids, NULL);
+};
+
+FIXTURE_TEARDOWN(migration)
+{
+ free(self->threads);
+ free(self->pids);
+}
+
+int migrate(uint64_t *ptr, int n1, int n2)
+{
+ int ret, tmp;
+ int status = 0;
+ struct timespec ts1, ts2;
+
+ if (clock_gettime(CLOCK_MONOTONIC, &ts1))
+ return -1;
+
+ while (1) {
+ if (clock_gettime(CLOCK_MONOTONIC, &ts2))
+ return -1;
+
+ if (ts2.tv_sec - ts1.tv_sec >= RUNTIME)
+ return 0;
+
+ ret = move_pages(0, 1, (void **) &ptr, &n2, &status,
+ MPOL_MF_MOVE_ALL);
+ if (ret) {
+ if (ret > 0)
+ printf("Didn't migrate %d pages\n", ret);
+ else
+ perror("Couldn't migrate pages");
+ return -2;
+ }
+
+ tmp = n2;
+ n2 = n1;
+ n1 = tmp;
+ }
+
+ return 0;
+}
+
+void *access_mem(void *ptr)
+{
+ uint64_t y = 0;
+ volatile uint64_t *x = ptr;
+
+ while (1) {
+ pthread_testcancel();
+ y += *x;
+ }
+
+ return NULL;
+}
+
+/*
+ * Basic migration entry testing. One thread will move pages back and forth
+ * between nodes whilst other threads try and access them triggering the
+ * migration entry wait paths in the kernel.
+ */
+TEST_F_TIMEOUT(migration, private_anon, 2*RUNTIME)
+{
+ uint64_t *ptr;
+ int i;
+
+ if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
+ SKIP(return, "Not enough threads or NUMA nodes available");
+
+ ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ memset(ptr, 0xde, TWOMEG);
+ for (i = 0; i < self->nthreads - 1; i++)
+ if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
+ perror("Couldn't create thread");
+
+ ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
+ for (i = 0; i < self->nthreads - 1; i++)
+ ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
+}
+
+/*
+ * Same as the previous test but with shared memory.
+ */
+TEST_F_TIMEOUT(migration, shared_anon, 2*RUNTIME)
+{
+ pid_t pid;
+ uint64_t *ptr;
+ int i;
+
+ if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
+ SKIP(return, "Not enough threads or NUMA nodes available");
+
+ ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ memset(ptr, 0xde, TWOMEG);
+ for (i = 0; i < self->nthreads - 1; i++) {
+ pid = fork();
+ if (!pid)
+ access_mem(ptr);
+ else
+ self->pids[i] = pid;
+ }
+
+ ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
+ for (i = 0; i < self->nthreads - 1; i++)
+ ASSERT_EQ(kill(self->pids[i], SIGTERM), 0);
+}
+
+/*
+ * Tests the pmd migration entry paths.
+ */
+TEST_F_TIMEOUT(migration, private_anon_thp, 2*RUNTIME)
+{
+ uint64_t *ptr;
+ int i;
+
+ if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
+ SKIP(return, "Not enough threads or NUMA nodes available");
+
+ ptr = mmap(NULL, 2*TWOMEG, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ ASSERT_NE(ptr, MAP_FAILED);
+
+ ptr = (uint64_t *) ALIGN((uintptr_t) ptr, TWOMEG);
+ ASSERT_EQ(madvise(ptr, TWOMEG, MADV_HUGEPAGE), 0);
+ memset(ptr, 0xde, TWOMEG);
+ for (i = 0; i < self->nthreads - 1; i++)
+ if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
+ perror("Couldn't create thread");
+
+ ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
+ for (i = 0; i < self->nthreads - 1; i++)
+ ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/vm/mrelease_test.c b/tools/testing/selftests/vm/mrelease_test.c
new file mode 100644
index 000000000000..96671c2f7d48
--- /dev/null
+++ b/tools/testing/selftests/vm/mrelease_test.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2022 Google LLC
+ */
+#define _GNU_SOURCE
+#include <errno.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "util.h"
+
+#include "../kselftest.h"
+
+#ifndef __NR_pidfd_open
+#define __NR_pidfd_open -1
+#endif
+
+#ifndef __NR_process_mrelease
+#define __NR_process_mrelease -1
+#endif
+
+#define MB(x) (x << 20)
+#define MAX_SIZE_MB 1024
+
+static int alloc_noexit(unsigned long nr_pages, int pipefd)
+{
+ int ppid = getppid();
+ int timeout = 10; /* 10sec timeout to get killed */
+ unsigned long i;
+ char *buf;
+
+ buf = (char *)mmap(NULL, nr_pages * PAGE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, 0, 0);
+ if (buf == MAP_FAILED) {
+ perror("mmap failed, halting the test");
+ return KSFT_FAIL;
+ }
+
+ for (i = 0; i < nr_pages; i++)
+ *((unsigned long *)(buf + (i * PAGE_SIZE))) = i;
+
+ /* Signal the parent that the child is ready */
+ if (write(pipefd, "", 1) < 0) {
+ perror("write");
+ return KSFT_FAIL;
+ }
+
+ /* Wait to be killed (when reparenting happens) */
+ while (getppid() == ppid && timeout > 0) {
+ sleep(1);
+ timeout--;
+ }
+
+ munmap(buf, nr_pages * PAGE_SIZE);
+
+ return (timeout > 0) ? KSFT_PASS : KSFT_FAIL;
+}
+
+/* The process_mrelease calls in this test are expected to fail */
+static void run_negative_tests(int pidfd)
+{
+ /* Test invalid flags. Expect to fail with EINVAL error code. */
+ if (!syscall(__NR_process_mrelease, pidfd, (unsigned int)-1) ||
+ errno != EINVAL) {
+ perror("process_mrelease with wrong flags");
+ exit(errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL);
+ }
+ /*
+ * Test reaping while process is alive with no pending SIGKILL.
+ * Expect to fail with EINVAL error code.
+ */
+ if (!syscall(__NR_process_mrelease, pidfd, 0) || errno != EINVAL) {
+ perror("process_mrelease on a live process");
+ exit(errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL);
+ }
+}
+
+static int child_main(int pipefd[], size_t size)
+{
+ int res;
+
+ /* Allocate and fault-in memory and wait to be killed */
+ close(pipefd[0]);
+ res = alloc_noexit(MB(size) / PAGE_SIZE, pipefd[1]);
+ close(pipefd[1]);
+ return res;
+}
+
+int main(void)
+{
+ int pipefd[2], pidfd;
+ bool success, retry;
+ size_t size;
+ pid_t pid;
+ char byte;
+ int res;
+
+ /* Test a wrong pidfd */
+ if (!syscall(__NR_process_mrelease, -1, 0) || errno != EBADF) {
+ perror("process_mrelease with wrong pidfd");
+ exit(errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL);
+ }
+
+ /* Start the test with 1MB child memory allocation */
+ size = 1;
+retry:
+ /*
+ * Pipe for the child to signal when it's done allocating
+ * memory
+ */
+ if (pipe(pipefd)) {
+ perror("pipe");
+ exit(KSFT_FAIL);
+ }
+ pid = fork();
+ if (pid < 0) {
+ perror("fork");
+ close(pipefd[0]);
+ close(pipefd[1]);
+ exit(KSFT_FAIL);
+ }
+
+ if (pid == 0) {
+ /* Child main routine */
+ res = child_main(pipefd, size);
+ exit(res);
+ }
+
+ /*
+ * Parent main routine:
+ * Wait for the child to finish allocations, then kill and reap
+ */
+ close(pipefd[1]);
+ /* Block until the child is ready */
+ res = read(pipefd[0], &byte, 1);
+ close(pipefd[0]);
+ if (res < 0) {
+ perror("read");
+ if (!kill(pid, SIGKILL))
+ waitpid(pid, NULL, 0);
+ exit(KSFT_FAIL);
+ }
+
+ pidfd = syscall(__NR_pidfd_open, pid, 0);
+ if (pidfd < 0) {
+ perror("pidfd_open");
+ if (!kill(pid, SIGKILL))
+ waitpid(pid, NULL, 0);
+ exit(KSFT_FAIL);
+ }
+
+ /* Run negative tests which require a live child */
+ run_negative_tests(pidfd);
+
+ if (kill(pid, SIGKILL)) {
+ perror("kill");
+ exit(errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL);
+ }
+
+ success = (syscall(__NR_process_mrelease, pidfd, 0) == 0);
+ if (!success) {
+ /*
+ * If we failed to reap because the child exited too soon,
+ * before we could call process_mrelease. Double child's memory
+ * which causes it to spend more time on cleanup and increases
+ * our chances of reaping its memory before it exits.
+ * Retry until we succeed or reach MAX_SIZE_MB.
+ */
+ if (errno == ESRCH) {
+ retry = (size <= MAX_SIZE_MB);
+ } else {
+ perror("process_mrelease");
+ waitpid(pid, NULL, 0);
+ exit(errno == ENOSYS ? KSFT_SKIP : KSFT_FAIL);
+ }
+ }
+
+ /* Cleanup to prevent zombies */
+ if (waitpid(pid, NULL, 0) < 0) {
+ perror("waitpid");
+ exit(KSFT_FAIL);
+ }
+ close(pidfd);
+
+ if (!success) {
+ if (retry) {
+ size *= 2;
+ goto retry;
+ }
+ printf("All process_mrelease attempts failed!\n");
+ exit(KSFT_FAIL);
+ }
+
+ printf("Success reaping a child with %zuMB of memory allocations\n",
+ size);
+ return KSFT_PASS;
+}
diff --git a/tools/testing/selftests/vm/pkey-x86.h b/tools/testing/selftests/vm/pkey-x86.h
index e4a4ce2b826d..b078ce9c6d2a 100644
--- a/tools/testing/selftests/vm/pkey-x86.h
+++ b/tools/testing/selftests/vm/pkey-x86.h
@@ -80,19 +80,6 @@ static inline void __write_pkey_reg(u64 pkey_reg)
assert(pkey_reg == __read_pkey_reg());
}
-static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
-{
- /* ecx is often an input as well as an output. */
- asm volatile(
- "cpuid;"
- : "=a" (*eax),
- "=b" (*ebx),
- "=c" (*ecx),
- "=d" (*edx)
- : "0" (*eax), "2" (*ecx));
-}
-
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx) */
#define X86_FEATURE_PKU (1<<3) /* Protection Keys for Userspace */
#define X86_FEATURE_OSPKE (1<<4) /* OS Protection Keys Enable */
@@ -104,9 +91,7 @@ static inline int cpu_has_pkeys(void)
unsigned int ecx;
unsigned int edx;
- eax = 0x7;
- ecx = 0x0;
- __cpuid(&eax, &ebx, &ecx, &edx);
+ __cpuid_count(0x7, 0x0, eax, ebx, ecx, edx);
if (!(ecx & X86_FEATURE_PKU)) {
dprintf2("cpu does not have PKU\n");
@@ -142,9 +127,7 @@ int pkey_reg_xstate_offset(void)
/* assume that XSTATE_PKEY is set in XCR0 */
leaf = XSTATE_PKEY_BIT;
{
- eax = XSTATE_CPUID;
- ecx = leaf;
- __cpuid(&eax, &ebx, &ecx, &edx);
+ __cpuid_count(XSTATE_CPUID, leaf, eax, ebx, ecx, edx);
if (leaf == XSTATE_PKEY_BIT) {
xstate_offset = ebx;
diff --git a/tools/testing/selftests/vm/protection_keys.c b/tools/testing/selftests/vm/protection_keys.c
index 2d0ae88665db..291bc1e07842 100644
--- a/tools/testing/selftests/vm/protection_keys.c
+++ b/tools/testing/selftests/vm/protection_keys.c
@@ -1523,7 +1523,7 @@ void test_implicit_mprotect_exec_only_memory(int *ptr, u16 pkey)
/*
* Reset the shadow, assuming that the above mprotect()
* correctly changed PKRU, but to an unknown value since
- * the actual alllocated pkey is unknown.
+ * the actual allocated pkey is unknown.
*/
shadow_pkey_reg = __read_pkey_reg();
diff --git a/tools/testing/selftests/vm/run_vmtests.sh b/tools/testing/selftests/vm/run_vmtests.sh
index 352ba00cf26b..41fce8bea929 100755
--- a/tools/testing/selftests/vm/run_vmtests.sh
+++ b/tools/testing/selftests/vm/run_vmtests.sh
@@ -9,12 +9,12 @@ mnt=./huge
exitcode=0
#get huge pagesize and freepages from /proc/meminfo
-while read name size unit; do
+while read -r name size unit; do
if [ "$name" = "HugePages_Free:" ]; then
- freepgs=$size
+ freepgs="$size"
fi
if [ "$name" = "Hugepagesize:" ]; then
- hpgsize_KB=$size
+ hpgsize_KB="$size"
fi
done < /proc/meminfo
@@ -30,27 +30,26 @@ needmem_KB=$((half_ufd_size_MB * 2 * 1024))
#set proper nr_hugepages
if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
- nr_hugepgs=`cat /proc/sys/vm/nr_hugepages`
+ nr_hugepgs=$(cat /proc/sys/vm/nr_hugepages)
needpgs=$((needmem_KB / hpgsize_KB))
tries=2
- while [ $tries -gt 0 ] && [ $freepgs -lt $needpgs ]; do
- lackpgs=$(( $needpgs - $freepgs ))
+ while [ "$tries" -gt 0 ] && [ "$freepgs" -lt "$needpgs" ]; do
+ lackpgs=$((needpgs - freepgs))
echo 3 > /proc/sys/vm/drop_caches
- echo $(( $lackpgs + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages
- if [ $? -ne 0 ]; then
+ if ! echo $((lackpgs + nr_hugepgs)) > /proc/sys/vm/nr_hugepages; then
echo "Please run this test as root"
exit $ksft_skip
fi
- while read name size unit; do
+ while read -r name size unit; do
if [ "$name" = "HugePages_Free:" ]; then
freepgs=$size
fi
done < /proc/meminfo
tries=$((tries - 1))
done
- if [ $freepgs -lt $needpgs ]; then
+ if [ "$freepgs" -lt "$needpgs" ]; then
printf "Not enough huge pages available (%d < %d)\n" \
- $freepgs $needpgs
+ "$freepgs" "$needpgs"
exit 1
fi
else
@@ -60,458 +59,124 @@ fi
#filter 64bit architectures
ARCH64STR="arm64 ia64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sh64 sparc64 x86_64"
-if [ -z $ARCH ]; then
- ARCH=`uname -m 2>/dev/null | sed -e 's/aarch64.*/arm64/'`
+if [ -z "$ARCH" ]; then
+ ARCH=$(uname -m 2>/dev/null | sed -e 's/aarch64.*/arm64/')
fi
VADDR64=0
-echo "$ARCH64STR" | grep $ARCH && VADDR64=1
-
-mkdir $mnt
-mount -t hugetlbfs none $mnt
-
-echo "---------------------"
-echo "running hugepage-mmap"
-echo "---------------------"
-./hugepage-mmap
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
+echo "$ARCH64STR" | grep "$ARCH" && VADDR64=1
+
+# Usage: run_test [test binary] [arbitrary test arguments...]
+run_test() {
+ local title="running $*"
+ local sep=$(echo -n "$title" | tr "[:graph:][:space:]" -)
+ printf "%s\n%s\n%s\n" "$sep" "$title" "$sep"
+
+ "$@"
+ local ret=$?
+ if [ $ret -eq 0 ]; then
+ echo "[PASS]"
+ elif [ $ret -eq $ksft_skip ]; then
+ echo "[SKIP]"
+ exitcode=$ksft_skip
+ else
+ echo "[FAIL]"
+ exitcode=1
+ fi
+}
-shmmax=`cat /proc/sys/kernel/shmmax`
-shmall=`cat /proc/sys/kernel/shmall`
+mkdir "$mnt"
+mount -t hugetlbfs none "$mnt"
+
+run_test ./hugepage-mmap
+
+shmmax=$(cat /proc/sys/kernel/shmmax)
+shmall=$(cat /proc/sys/kernel/shmall)
echo 268435456 > /proc/sys/kernel/shmmax
echo 4194304 > /proc/sys/kernel/shmall
-echo "--------------------"
-echo "running hugepage-shm"
-echo "--------------------"
-./hugepage-shm
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-echo $shmmax > /proc/sys/kernel/shmmax
-echo $shmall > /proc/sys/kernel/shmall
-
-echo "-------------------"
-echo "running map_hugetlb"
-echo "-------------------"
-./map_hugetlb
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
+run_test ./hugepage-shm
+echo "$shmmax" > /proc/sys/kernel/shmmax
+echo "$shmall" > /proc/sys/kernel/shmall
-echo "-----------------------"
-echo "running hugepage-mremap"
-echo "-----------------------"
-./hugepage-mremap $mnt/huge_mremap
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-rm -f $mnt/huge_mremap
-
-echo "------------------------"
-echo "running hugepage-vmemmap"
-echo "------------------------"
-./hugepage-vmemmap
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
+run_test ./map_hugetlb
-echo "-----------------------"
-echo "running hugetlb-madvise"
-echo "-----------------------"
-./hugetlb-madvise $mnt/madvise-test
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-rm -f $mnt/madvise-test
+run_test ./hugepage-mremap "$mnt"/huge_mremap
+rm -f "$mnt"/huge_mremap
+
+run_test ./hugepage-vmemmap
+
+run_test ./hugetlb-madvise "$mnt"/madvise-test
+rm -f "$mnt"/madvise-test
echo "NOTE: The above hugetlb tests provide minimal coverage. Use"
echo " https://github.com/libhugetlbfs/libhugetlbfs.git for"
echo " hugetlb regression testing."
-echo "---------------------------"
-echo "running map_fixed_noreplace"
-echo "---------------------------"
-./map_fixed_noreplace
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
+run_test ./map_fixed_noreplace
-echo "------------------------------------------------------"
-echo "running: gup_test -u # get_user_pages_fast() benchmark"
-echo "------------------------------------------------------"
-./gup_test -u
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
+# get_user_pages_fast() benchmark
+run_test ./gup_test -u
+# pin_user_pages_fast() benchmark
+run_test ./gup_test -a
+# Dump pages 0, 19, and 4096, using pin_user_pages:
+run_test ./gup_test -ct -F 0x1 0 19 0x1000
-echo "------------------------------------------------------"
-echo "running: gup_test -a # pin_user_pages_fast() benchmark"
-echo "------------------------------------------------------"
-./gup_test -a
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-echo "------------------------------------------------------------"
-echo "# Dump pages 0, 19, and 4096, using pin_user_pages:"
-echo "running: gup_test -ct -F 0x1 0 19 0x1000 # dump_page() test"
-echo "------------------------------------------------------------"
-./gup_test -ct -F 0x1 0 19 0x1000
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-echo "-------------------"
-echo "running userfaultfd"
-echo "-------------------"
-./userfaultfd anon 20 16
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-echo "---------------------------"
-echo "running userfaultfd_hugetlb"
-echo "---------------------------"
+run_test ./userfaultfd anon 20 16
# Test requires source and destination huge pages. Size of source
# (half_ufd_size_MB) is passed as argument to test.
-./userfaultfd hugetlb $half_ufd_size_MB 32
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-echo "-------------------------"
-echo "running userfaultfd_shmem"
-echo "-------------------------"
-./userfaultfd shmem 20 16
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
+run_test ./userfaultfd hugetlb "$half_ufd_size_MB" 32
+run_test ./userfaultfd shmem 20 16
#cleanup
-umount $mnt
-rm -rf $mnt
-echo $nr_hugepgs > /proc/sys/vm/nr_hugepages
-
-echo "-----------------------"
-echo "running compaction_test"
-echo "-----------------------"
-./compaction_test
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-echo "----------------------"
-echo "running on-fault-limit"
-echo "----------------------"
-sudo -u nobody ./on-fault-limit
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-echo "--------------------"
-echo "running map_populate"
-echo "--------------------"
-./map_populate
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
+umount "$mnt"
+rm -rf "$mnt"
+echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages
-echo "-------------------------"
-echo "running mlock-random-test"
-echo "-------------------------"
-./mlock-random-test
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
+run_test ./compaction_test
-echo "--------------------"
-echo "running mlock2-tests"
-echo "--------------------"
-./mlock2-tests
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
+run_test sudo -u nobody ./on-fault-limit
-echo "-------------------"
-echo "running mremap_test"
-echo "-------------------"
-./mremap_test
-ret_val=$?
-
-if [ $ret_val -eq 0 ]; then
- echo "[PASS]"
-elif [ $ret_val -eq $ksft_skip ]; then
- echo "[SKIP]"
- exitcode=$ksft_skip
-else
- echo "[FAIL]"
- exitcode=1
-fi
-
-echo "-----------------"
-echo "running thuge-gen"
-echo "-----------------"
-./thuge-gen
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-if [ $VADDR64 -ne 0 ]; then
-echo "-----------------------------"
-echo "running virtual_address_range"
-echo "-----------------------------"
-./virtual_address_range
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
+run_test ./map_populate
-echo "-----------------------------"
-echo "running virtual address 128TB switch test"
-echo "-----------------------------"
-./va_128TBswitch
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-fi # VADDR64
+run_test ./mlock-random-test
-echo "------------------------------------"
-echo "running vmalloc stability smoke test"
-echo "------------------------------------"
-./test_vmalloc.sh smoke
-ret_val=$?
-
-if [ $ret_val -eq 0 ]; then
- echo "[PASS]"
-elif [ $ret_val -eq $ksft_skip ]; then
- echo "[SKIP]"
- exitcode=$ksft_skip
-else
- echo "[FAIL]"
- exitcode=1
-fi
+run_test ./mlock2-tests
-echo "------------------------------------"
-echo "running MREMAP_DONTUNMAP smoke test"
-echo "------------------------------------"
-./mremap_dontunmap
-ret_val=$?
-
-if [ $ret_val -eq 0 ]; then
- echo "[PASS]"
-elif [ $ret_val -eq $ksft_skip ]; then
- echo "[SKIP]"
- exitcode=$ksft_skip
-else
- echo "[FAIL]"
- exitcode=1
-fi
+run_test ./mrelease_test
-echo "running HMM smoke test"
-echo "------------------------------------"
-./test_hmm.sh smoke
-ret_val=$?
+run_test ./mremap_test
-if [ $ret_val -eq 0 ]; then
- echo "[PASS]"
-elif [ $ret_val -eq $ksft_skip ]; then
- echo "[SKIP]"
- exitcode=$ksft_skip
-else
- echo "[FAIL]"
- exitcode=1
-fi
+run_test ./thuge-gen
-echo "--------------------------------------------------------"
-echo "running MADV_POPULATE_READ and MADV_POPULATE_WRITE tests"
-echo "--------------------------------------------------------"
-./madv_populate
-ret_val=$?
-
-if [ $ret_val -eq 0 ]; then
- echo "[PASS]"
-elif [ $ret_val -eq $ksft_skip ]; then
- echo "[SKIP]"
- exitcode=$ksft_skip
-else
- echo "[FAIL]"
- exitcode=1
-fi
+if [ $VADDR64 -ne 0 ]; then
+ run_test ./virtual_address_range
-echo "running memfd_secret test"
-echo "------------------------------------"
-./memfd_secret
-ret_val=$?
+ # virtual address 128TB switch test
+ run_test ./va_128TBswitch
+fi # VADDR64
-if [ $ret_val -eq 0 ]; then
- echo "[PASS]"
-elif [ $ret_val -eq $ksft_skip ]; then
- echo "[SKIP]"
- exitcode=$ksft_skip
-else
- echo "[FAIL]"
- exitcode=1
-fi
+# vmalloc stability smoke test
+run_test ./test_vmalloc.sh smoke
-echo "-------------------------------------------------------"
-echo "running KSM MADV_MERGEABLE test with 10 identical pages"
-echo "-------------------------------------------------------"
-./ksm_tests -M -p 10
-ret_val=$?
-
-if [ $ret_val -eq 0 ]; then
- echo "[PASS]"
-elif [ $ret_val -eq $ksft_skip ]; then
- echo "[SKIP]"
- exitcode=$ksft_skip
-else
- echo "[FAIL]"
- exitcode=1
-fi
+run_test ./mremap_dontunmap
-echo "------------------------"
-echo "running KSM unmerge test"
-echo "------------------------"
-./ksm_tests -U
-ret_val=$?
-
-if [ $ret_val -eq 0 ]; then
- echo "[PASS]"
-elif [ $ret_val -eq $ksft_skip ]; then
- echo "[SKIP]"
- exitcode=$ksft_skip
-else
- echo "[FAIL]"
- exitcode=1
-fi
+run_test ./test_hmm.sh smoke
-echo "----------------------------------------------------------"
-echo "running KSM test with 10 zero pages and use_zero_pages = 0"
-echo "----------------------------------------------------------"
-./ksm_tests -Z -p 10 -z 0
-ret_val=$?
-
-if [ $ret_val -eq 0 ]; then
- echo "[PASS]"
-elif [ $ret_val -eq $ksft_skip ]; then
- echo "[SKIP]"
- exitcode=$ksft_skip
-else
- echo "[FAIL]"
- exitcode=1
-fi
-
-echo "----------------------------------------------------------"
-echo "running KSM test with 10 zero pages and use_zero_pages = 1"
-echo "----------------------------------------------------------"
-./ksm_tests -Z -p 10 -z 1
-ret_val=$?
-
-if [ $ret_val -eq 0 ]; then
- echo "[PASS]"
-elif [ $ret_val -eq $ksft_skip ]; then
- echo "[SKIP]"
- exitcode=$ksft_skip
-else
- echo "[FAIL]"
- exitcode=1
-fi
-
-echo "-------------------------------------------------------------"
-echo "running KSM test with 2 NUMA nodes and merge_across_nodes = 1"
-echo "-------------------------------------------------------------"
-./ksm_tests -N -m 1
-ret_val=$?
-
-if [ $ret_val -eq 0 ]; then
- echo "[PASS]"
-elif [ $ret_val -eq $ksft_skip ]; then
- echo "[SKIP]"
- exitcode=$ksft_skip
-else
- echo "[FAIL]"
- exitcode=1
-fi
+# MADV_POPULATE_READ and MADV_POPULATE_WRITE tests
+run_test ./madv_populate
-echo "-------------------------------------------------------------"
-echo "running KSM test with 2 NUMA nodes and merge_across_nodes = 0"
-echo "-------------------------------------------------------------"
-./ksm_tests -N -m 0
-ret_val=$?
-
-if [ $ret_val -eq 0 ]; then
- echo "[PASS]"
-elif [ $ret_val -eq $ksft_skip ]; then
- echo "[SKIP]"
- exitcode=$ksft_skip
-else
- echo "[FAIL]"
- exitcode=1
-fi
+run_test ./memfd_secret
-exit $exitcode
+# KSM MADV_MERGEABLE test with 10 identical pages
+run_test ./ksm_tests -M -p 10
+# KSM unmerge test
+run_test ./ksm_tests -U
+# KSM test with 10 zero pages and use_zero_pages = 0
+run_test ./ksm_tests -Z -p 10 -z 0
+# KSM test with 10 zero pages and use_zero_pages = 1
+run_test ./ksm_tests -Z -p 10 -z 1
+# KSM test with 2 NUMA nodes and merge_across_nodes = 1
+run_test ./ksm_tests -N -m 1
+# KSM test with 2 NUMA nodes and merge_across_nodes = 0
+run_test ./ksm_tests -N -m 0
exit $exitcode
diff --git a/tools/testing/selftests/vm/settings b/tools/testing/selftests/vm/settings
new file mode 100644
index 000000000000..9abfc60e9e6f
--- /dev/null
+++ b/tools/testing/selftests/vm/settings
@@ -0,0 +1 @@
+timeout=45
diff --git a/tools/testing/selftests/vm/soft-dirty.c b/tools/testing/selftests/vm/soft-dirty.c
new file mode 100644
index 000000000000..08ab62a4a9d0
--- /dev/null
+++ b/tools/testing/selftests/vm/soft-dirty.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <string.h>
+#include <stdbool.h>
+#include <fcntl.h>
+#include <stdint.h>
+#include <malloc.h>
+#include <sys/mman.h>
+#include "../kselftest.h"
+#include "vm_util.h"
+
+#define PAGEMAP_FILE_PATH "/proc/self/pagemap"
+#define TEST_ITERATIONS 10000
+
+static void test_simple(int pagemap_fd, int pagesize)
+{
+ int i;
+ char *map;
+
+ map = aligned_alloc(pagesize, pagesize);
+ if (!map)
+ ksft_exit_fail_msg("mmap failed\n");
+
+ clear_softdirty();
+
+ for (i = 0 ; i < TEST_ITERATIONS; i++) {
+ if (pagemap_is_softdirty(pagemap_fd, map) == 1) {
+ ksft_print_msg("dirty bit was 1, but should be 0 (i=%d)\n", i);
+ break;
+ }
+
+ clear_softdirty();
+ // Write something to the page to get the dirty bit enabled on the page
+ map[0]++;
+
+ if (pagemap_is_softdirty(pagemap_fd, map) == 0) {
+ ksft_print_msg("dirty bit was 0, but should be 1 (i=%d)\n", i);
+ break;
+ }
+
+ clear_softdirty();
+ }
+ free(map);
+
+ ksft_test_result(i == TEST_ITERATIONS, "Test %s\n", __func__);
+}
+
+static void test_vma_reuse(int pagemap_fd, int pagesize)
+{
+ char *map, *map2;
+
+ map = mmap(NULL, pagesize, (PROT_READ | PROT_WRITE), (MAP_PRIVATE | MAP_ANON), -1, 0);
+ if (map == MAP_FAILED)
+ ksft_exit_fail_msg("mmap failed");
+
+ // The kernel always marks new regions as soft dirty
+ ksft_test_result(pagemap_is_softdirty(pagemap_fd, map) == 1,
+ "Test %s dirty bit of allocated page\n", __func__);
+
+ clear_softdirty();
+ munmap(map, pagesize);
+
+ map2 = mmap(NULL, pagesize, (PROT_READ | PROT_WRITE), (MAP_PRIVATE | MAP_ANON), -1, 0);
+ if (map2 == MAP_FAILED)
+ ksft_exit_fail_msg("mmap failed");
+
+ // Dirty bit is set for new regions even if they are reused
+ if (map == map2)
+ ksft_test_result(pagemap_is_softdirty(pagemap_fd, map2) == 1,
+ "Test %s dirty bit of reused address page\n", __func__);
+ else
+ ksft_test_result_skip("Test %s dirty bit of reused address page\n", __func__);
+
+ munmap(map2, pagesize);
+}
+
+static void test_hugepage(int pagemap_fd, int pagesize)
+{
+ char *map;
+ int i, ret;
+ size_t hpage_len = read_pmd_pagesize();
+
+ map = memalign(hpage_len, hpage_len);
+ if (!map)
+ ksft_exit_fail_msg("memalign failed\n");
+
+ ret = madvise(map, hpage_len, MADV_HUGEPAGE);
+ if (ret)
+ ksft_exit_fail_msg("madvise failed %d\n", ret);
+
+ for (i = 0; i < hpage_len; i++)
+ map[i] = (char)i;
+
+ if (check_huge(map)) {
+ ksft_test_result_pass("Test %s huge page allocation\n", __func__);
+
+ clear_softdirty();
+ for (i = 0 ; i < TEST_ITERATIONS ; i++) {
+ if (pagemap_is_softdirty(pagemap_fd, map) == 1) {
+ ksft_print_msg("dirty bit was 1, but should be 0 (i=%d)\n", i);
+ break;
+ }
+
+ clear_softdirty();
+ // Write something to the page to get the dirty bit enabled on the page
+ map[0]++;
+
+ if (pagemap_is_softdirty(pagemap_fd, map) == 0) {
+ ksft_print_msg("dirty bit was 0, but should be 1 (i=%d)\n", i);
+ break;
+ }
+ clear_softdirty();
+ }
+
+ ksft_test_result(i == TEST_ITERATIONS, "Test %s huge page dirty bit\n", __func__);
+ } else {
+ // hugepage allocation failed. skip these tests
+ ksft_test_result_skip("Test %s huge page allocation\n", __func__);
+ ksft_test_result_skip("Test %s huge page dirty bit\n", __func__);
+ }
+ free(map);
+}
+
+int main(int argc, char **argv)
+{
+ int pagemap_fd;
+ int pagesize;
+
+ ksft_print_header();
+ ksft_set_plan(5);
+
+ pagemap_fd = open(PAGEMAP_FILE_PATH, O_RDONLY);
+ if (pagemap_fd < 0)
+ ksft_exit_fail_msg("Failed to open %s\n", PAGEMAP_FILE_PATH);
+
+ pagesize = getpagesize();
+
+ test_simple(pagemap_fd, pagesize);
+ test_vma_reuse(pagemap_fd, pagesize);
+ test_hugepage(pagemap_fd, pagesize);
+
+ close(pagemap_fd);
+
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/vm/split_huge_page_test.c b/tools/testing/selftests/vm/split_huge_page_test.c
index 52497b7b9f1d..6aa2b8253aed 100644
--- a/tools/testing/selftests/vm/split_huge_page_test.c
+++ b/tools/testing/selftests/vm/split_huge_page_test.c
@@ -16,14 +16,13 @@
#include <sys/mount.h>
#include <malloc.h>
#include <stdbool.h>
+#include "vm_util.h"
uint64_t pagesize;
unsigned int pageshift;
uint64_t pmd_pagesize;
-#define PMD_SIZE_PATH "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size"
#define SPLIT_DEBUGFS "/sys/kernel/debug/split_huge_pages"
-#define SMAP_PATH "/proc/self/smaps"
#define INPUT_MAX 80
#define PID_FMT "%d,0x%lx,0x%lx"
@@ -51,30 +50,6 @@ int is_backed_by_thp(char *vaddr, int pagemap_file, int kpageflags_file)
return 0;
}
-
-static uint64_t read_pmd_pagesize(void)
-{
- int fd;
- char buf[20];
- ssize_t num_read;
-
- fd = open(PMD_SIZE_PATH, O_RDONLY);
- if (fd == -1) {
- perror("Open hpage_pmd_size failed");
- exit(EXIT_FAILURE);
- }
- num_read = read(fd, buf, 19);
- if (num_read < 1) {
- close(fd);
- perror("Read hpage_pmd_size failed");
- exit(EXIT_FAILURE);
- }
- buf[num_read] = '\0';
- close(fd);
-
- return strtoul(buf, NULL, 10);
-}
-
static int write_file(const char *path, const char *buf, size_t buflen)
{
int fd;
@@ -113,58 +88,6 @@ static void write_debugfs(const char *fmt, ...)
}
}
-#define MAX_LINE_LENGTH 500
-
-static bool check_for_pattern(FILE *fp, const char *pattern, char *buf)
-{
- while (fgets(buf, MAX_LINE_LENGTH, fp) != NULL) {
- if (!strncmp(buf, pattern, strlen(pattern)))
- return true;
- }
- return false;
-}
-
-static uint64_t check_huge(void *addr)
-{
- uint64_t thp = 0;
- int ret;
- FILE *fp;
- char buffer[MAX_LINE_LENGTH];
- char addr_pattern[MAX_LINE_LENGTH];
-
- ret = snprintf(addr_pattern, MAX_LINE_LENGTH, "%08lx-",
- (unsigned long) addr);
- if (ret >= MAX_LINE_LENGTH) {
- printf("%s: Pattern is too long\n", __func__);
- exit(EXIT_FAILURE);
- }
-
-
- fp = fopen(SMAP_PATH, "r");
- if (!fp) {
- printf("%s: Failed to open file %s\n", __func__, SMAP_PATH);
- exit(EXIT_FAILURE);
- }
- if (!check_for_pattern(fp, addr_pattern, buffer))
- goto err_out;
-
- /*
- * Fetch the AnonHugePages: in the same block and check the number of
- * hugepages.
- */
- if (!check_for_pattern(fp, "AnonHugePages:", buffer))
- goto err_out;
-
- if (sscanf(buffer, "AnonHugePages:%10ld kB", &thp) != 1) {
- printf("Reading smap error\n");
- exit(EXIT_FAILURE);
- }
-
-err_out:
- fclose(fp);
- return thp;
-}
-
void split_pmd_thp(void)
{
char *one_page;
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index 92a4516f8f0d..4bc24581760d 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -82,7 +82,7 @@ static int test_type;
static volatile bool test_uffdio_copy_eexist = true;
static volatile bool test_uffdio_zeropage_eexist = true;
/* Whether to test uffd write-protection */
-static bool test_uffdio_wp = false;
+static bool test_uffdio_wp = true;
/* Whether to test uffd minor faults */
static bool test_uffdio_minor = false;
@@ -860,7 +860,7 @@ static int stress(struct uffd_stats *uffd_stats)
/*
* Be strict and immediately zap area_src, the whole area has
* been transferred already by the background treads. The
- * area_src could then be faulted in in a racy way by still
+ * area_src could then be faulted in a racy way by still
* running uffdio_threads reading zeropages after we zapped
* area_src (but they're guaranteed to get -EEXIST from
* UFFDIO_COPY without writing zero pages into area_dst
@@ -1422,7 +1422,6 @@ static void userfaultfd_pagemap_test(unsigned int test_pgsize)
static int userfaultfd_stress(void)
{
void *area;
- char *tmp_area;
unsigned long nr;
struct uffdio_register uffdio_register;
struct uffd_stats uffd_stats[nr_cpus];
@@ -1533,13 +1532,9 @@ static int userfaultfd_stress(void)
count_verify[nr], nr);
/* prepare next bounce */
- tmp_area = area_src;
- area_src = area_dst;
- area_dst = tmp_area;
+ swap(area_src, area_dst);
- tmp_area = area_src_alias;
- area_src_alias = area_dst_alias;
- area_dst_alias = tmp_area;
+ swap(area_src_alias, area_dst_alias);
uffd_stats_report(uffd_stats, nr_cpus);
}
@@ -1594,8 +1589,6 @@ static void set_test_type(const char *type)
if (!strcmp(type, "anon")) {
test_type = TEST_ANON;
uffd_test_ops = &anon_uffd_test_ops;
- /* Only enable write-protect test for anonymous test */
- test_uffdio_wp = true;
} else if (!strcmp(type, "hugetlb")) {
test_type = TEST_HUGETLB;
uffd_test_ops = &hugetlb_uffd_test_ops;
diff --git a/tools/testing/selftests/vm/vm_util.c b/tools/testing/selftests/vm/vm_util.c
new file mode 100644
index 000000000000..b58ab11a7a30
--- /dev/null
+++ b/tools/testing/selftests/vm/vm_util.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <string.h>
+#include <fcntl.h>
+#include "../kselftest.h"
+#include "vm_util.h"
+
+#define PMD_SIZE_FILE_PATH "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size"
+#define SMAP_FILE_PATH "/proc/self/smaps"
+#define MAX_LINE_LENGTH 500
+
+uint64_t pagemap_get_entry(int fd, char *start)
+{
+ const unsigned long pfn = (unsigned long)start / getpagesize();
+ uint64_t entry;
+ int ret;
+
+ ret = pread(fd, &entry, sizeof(entry), pfn * sizeof(entry));
+ if (ret != sizeof(entry))
+ ksft_exit_fail_msg("reading pagemap failed\n");
+ return entry;
+}
+
+bool pagemap_is_softdirty(int fd, char *start)
+{
+ uint64_t entry = pagemap_get_entry(fd, start);
+
+ // Check if dirty bit (55th bit) is set
+ return entry & 0x0080000000000000ull;
+}
+
+void clear_softdirty(void)
+{
+ int ret;
+ const char *ctrl = "4";
+ int fd = open("/proc/self/clear_refs", O_WRONLY);
+
+ if (fd < 0)
+ ksft_exit_fail_msg("opening clear_refs failed\n");
+ ret = write(fd, ctrl, strlen(ctrl));
+ close(fd);
+ if (ret != strlen(ctrl))
+ ksft_exit_fail_msg("writing clear_refs failed\n");
+}
+
+static bool check_for_pattern(FILE *fp, const char *pattern, char *buf)
+{
+ while (fgets(buf, MAX_LINE_LENGTH, fp) != NULL) {
+ if (!strncmp(buf, pattern, strlen(pattern)))
+ return true;
+ }
+ return false;
+}
+
+uint64_t read_pmd_pagesize(void)
+{
+ int fd;
+ char buf[20];
+ ssize_t num_read;
+
+ fd = open(PMD_SIZE_FILE_PATH, O_RDONLY);
+ if (fd == -1)
+ ksft_exit_fail_msg("Open hpage_pmd_size failed\n");
+
+ num_read = read(fd, buf, 19);
+ if (num_read < 1) {
+ close(fd);
+ ksft_exit_fail_msg("Read hpage_pmd_size failed\n");
+ }
+ buf[num_read] = '\0';
+ close(fd);
+
+ return strtoul(buf, NULL, 10);
+}
+
+uint64_t check_huge(void *addr)
+{
+ uint64_t thp = 0;
+ int ret;
+ FILE *fp;
+ char buffer[MAX_LINE_LENGTH];
+ char addr_pattern[MAX_LINE_LENGTH];
+
+ ret = snprintf(addr_pattern, MAX_LINE_LENGTH, "%08lx-",
+ (unsigned long) addr);
+ if (ret >= MAX_LINE_LENGTH)
+ ksft_exit_fail_msg("%s: Pattern is too long\n", __func__);
+
+ fp = fopen(SMAP_FILE_PATH, "r");
+ if (!fp)
+ ksft_exit_fail_msg("%s: Failed to open file %s\n", __func__, SMAP_FILE_PATH);
+
+ if (!check_for_pattern(fp, addr_pattern, buffer))
+ goto err_out;
+
+ /*
+ * Fetch the AnonHugePages: in the same block and check the number of
+ * hugepages.
+ */
+ if (!check_for_pattern(fp, "AnonHugePages:", buffer))
+ goto err_out;
+
+ if (sscanf(buffer, "AnonHugePages:%10ld kB", &thp) != 1)
+ ksft_exit_fail_msg("Reading smap error\n");
+
+err_out:
+ fclose(fp);
+ return thp;
+}
diff --git a/tools/testing/selftests/vm/vm_util.h b/tools/testing/selftests/vm/vm_util.h
new file mode 100644
index 000000000000..2e512bd57ae1
--- /dev/null
+++ b/tools/testing/selftests/vm/vm_util.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <stdint.h>
+#include <stdbool.h>
+
+uint64_t pagemap_get_entry(int fd, char *start);
+bool pagemap_is_softdirty(int fd, char *start);
+void clear_softdirty(void);
+uint64_t read_pmd_pagesize(void);
+uint64_t check_huge(void *addr);
diff --git a/tools/testing/selftests/wireguard/qemu/Makefile b/tools/testing/selftests/wireguard/qemu/Makefile
index bca07b93eeb0..fda76282d34b 100644
--- a/tools/testing/selftests/wireguard/qemu/Makefile
+++ b/tools/testing/selftests/wireguard/qemu/Makefile
@@ -19,8 +19,6 @@ endif
MIRROR := https://download.wireguard.com/qemu-test/distfiles/
KERNEL_BUILD_PATH := $(BUILD_PATH)/kernel$(if $(findstring yes,$(DEBUG_KERNEL)),-debug)
-rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d))
-WIREGUARD_SOURCES := $(call rwildcard,$(KERNEL_PATH)/drivers/net/wireguard/,*)
default: qemu
@@ -64,8 +62,8 @@ QEMU_VPORT_RESULT := virtio-serial-device
ifeq ($(HOST_ARCH),$(ARCH))
QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm
else
-QEMU_MACHINE := -cpu cortex-a53 -machine virt
-CFLAGS += -march=armv8-a -mtune=cortex-a53
+QEMU_MACHINE := -cpu max -machine virt
+CFLAGS += -march=armv8-a
endif
else ifeq ($(ARCH),aarch64_be)
CHOST := aarch64_be-linux-musl
@@ -76,8 +74,8 @@ QEMU_VPORT_RESULT := virtio-serial-device
ifeq ($(HOST_ARCH),$(ARCH))
QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm
else
-QEMU_MACHINE := -cpu cortex-a53 -machine virt
-CFLAGS += -march=armv8-a -mtune=cortex-a53
+QEMU_MACHINE := -cpu max -machine virt
+CFLAGS += -march=armv8-a
endif
else ifeq ($(ARCH),arm)
CHOST := arm-linux-musleabi
@@ -88,8 +86,8 @@ QEMU_VPORT_RESULT := virtio-serial-device
ifeq ($(HOST_ARCH),$(ARCH))
QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm
else
-QEMU_MACHINE := -cpu cortex-a15 -machine virt
-CFLAGS += -march=armv7-a -mtune=cortex-a15 -mabi=aapcs-linux
+QEMU_MACHINE := -cpu max -machine virt
+CFLAGS += -march=armv7-a -mabi=aapcs-linux
endif
else ifeq ($(ARCH),armeb)
CHOST := armeb-linux-musleabi
@@ -100,8 +98,8 @@ QEMU_VPORT_RESULT := virtio-serial-device
ifeq ($(HOST_ARCH),$(ARCH))
QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm
else
-QEMU_MACHINE := -cpu cortex-a15 -machine virt
-CFLAGS += -march=armv7-a -mabi=aapcs-linux # We don't pass -mtune=cortex-a15 due to a compiler bug on big endian.
+QEMU_MACHINE := -cpu max -machine virt
+CFLAGS += -march=armv7-a -mabi=aapcs-linux
LDFLAGS += -Wl,--be8
endif
else ifeq ($(ARCH),x86_64)
@@ -109,22 +107,22 @@ CHOST := x86_64-linux-musl
QEMU_ARCH := x86_64
KERNEL_ARCH := x86_64
KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage
+QEMU_VPORT_RESULT := virtio-serial-device
ifeq ($(HOST_ARCH),$(ARCH))
-QEMU_MACHINE := -cpu host -machine q35,accel=kvm
+QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off -no-acpi
else
-QEMU_MACHINE := -cpu Skylake-Server -machine q35
-CFLAGS += -march=skylake-avx512
+QEMU_MACHINE := -cpu max -machine microvm -no-acpi
endif
else ifeq ($(ARCH),i686)
CHOST := i686-linux-musl
QEMU_ARCH := i386
KERNEL_ARCH := x86
KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage
+QEMU_VPORT_RESULT := virtio-serial-device
ifeq ($(subst x86_64,i686,$(HOST_ARCH)),$(ARCH))
-QEMU_MACHINE := -cpu host -machine q35,accel=kvm
+QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off -no-acpi
else
-QEMU_MACHINE := -cpu coreduo -machine q35
-CFLAGS += -march=prescott
+QEMU_MACHINE := -cpu coreduo -machine microvm -no-acpi
endif
else ifeq ($(ARCH),mips64)
CHOST := mips64-linux-musl
@@ -182,7 +180,7 @@ KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux
ifeq ($(HOST_ARCH),$(ARCH))
QEMU_MACHINE := -cpu host,accel=kvm -machine pseries
else
-QEMU_MACHINE := -machine pseries
+QEMU_MACHINE := -machine pseries -device spapr-rng,rng=rng -object rng-random,id=rng
endif
else ifeq ($(ARCH),powerpc64le)
CHOST := powerpc64le-linux-musl
@@ -192,7 +190,7 @@ KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux
ifeq ($(HOST_ARCH),$(ARCH))
QEMU_MACHINE := -cpu host,accel=kvm -machine pseries
else
-QEMU_MACHINE := -machine pseries
+QEMU_MACHINE := -machine pseries -device spapr-rng,rng=rng -object rng-random,id=rng
endif
else ifeq ($(ARCH),powerpc)
CHOST := powerpc-linux-musl
@@ -210,10 +208,11 @@ QEMU_ARCH := m68k
KERNEL_ARCH := m68k
KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux
KERNEL_CMDLINE := $(shell sed -n 's/CONFIG_CMDLINE=\(.*\)/\1/p' arch/m68k.config)
+QEMU_VPORT_RESULT := virtio-serial-device
ifeq ($(HOST_ARCH),$(ARCH))
-QEMU_MACHINE := -cpu host,accel=kvm -machine q800 -append $(KERNEL_CMDLINE)
+QEMU_MACHINE := -cpu host,accel=kvm -machine virt -append $(KERNEL_CMDLINE)
else
-QEMU_MACHINE := -machine q800 -smp 1 -append $(KERNEL_CMDLINE)
+QEMU_MACHINE := -machine virt -smp 1 -append $(KERNEL_CMDLINE)
endif
else ifeq ($(ARCH),riscv64)
CHOST := riscv64-linux-musl
@@ -247,10 +246,15 @@ QEMU_VPORT_RESULT := virtio-serial-ccw
ifeq ($(HOST_ARCH),$(ARCH))
QEMU_MACHINE := -cpu host,accel=kvm -machine s390-ccw-virtio -append $(KERNEL_CMDLINE)
else
-QEMU_MACHINE := -machine s390-ccw-virtio -append $(KERNEL_CMDLINE)
+QEMU_MACHINE := -cpu max -machine s390-ccw-virtio -append $(KERNEL_CMDLINE)
endif
+else ifeq ($(ARCH),um)
+CHOST := $(HOST_ARCH)-linux-musl
+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux
+KERNEL_ARCH := um
+KERNEL_CMDLINE := $(shell sed -n 's/CONFIG_CMDLINE=\(.*\)/\1/p' arch/um.config)
else
-$(error I only build: x86_64, i686, arm, armeb, aarch64, aarch64_be, mips, mipsel, mips64, mips64el, powerpc64, powerpc64le, powerpc, m68k, riscv64, riscv32, s390x)
+$(error I only build: x86_64, i686, arm, armeb, aarch64, aarch64_be, mips, mipsel, mips64, mips64el, powerpc64, powerpc64le, powerpc, m68k, riscv64, riscv32, s390x, um)
endif
TOOLCHAIN_FILENAME := $(CHOST)-cross.tgz
@@ -263,7 +267,9 @@ $(eval $(call file_download,$(TOOLCHAIN_FILENAME),$(TOOLCHAIN_DIR),,$(DISTFILES_
STRIP := $(CHOST)-strip
CROSS_COMPILE_FLAG := --build=$(CBUILD) --host=$(CHOST)
$(info Building for $(CHOST) using $(CBUILD))
+ifneq ($(ARCH),um)
export CROSS_COMPILE := $(CHOST)-
+endif
export PATH := $(TOOLCHAIN_PATH)/bin:$(PATH)
export CC := $(CHOST)-gcc
CCACHE_PATH := $(shell which ccache 2>/dev/null)
@@ -280,6 +286,7 @@ comma := ,
build: $(KERNEL_BZIMAGE)
qemu: $(KERNEL_BZIMAGE)
rm -f $(BUILD_PATH)/result
+ifneq ($(ARCH),um)
timeout --foreground 20m qemu-system-$(QEMU_ARCH) \
-nodefaults \
-nographic \
@@ -292,6 +299,13 @@ qemu: $(KERNEL_BZIMAGE)
-no-reboot \
-monitor none \
-kernel $<
+else
+ timeout --foreground 20m $< \
+ $(KERNEL_CMDLINE) \
+ mem=$$(grep -q CONFIG_DEBUG_KMEMLEAK=y $(KERNEL_BUILD_PATH)/.config && echo 1G || echo 256M) \
+ noreboot \
+ con1=fd:51 51>$(BUILD_PATH)/result </dev/null 2>&1 | cat
+endif
grep -Fq success $(BUILD_PATH)/result
$(BUILD_PATH)/init-cpio-spec.txt: $(TOOLCHAIN_PATH)/.installed $(BUILD_PATH)/init
@@ -324,8 +338,9 @@ $(KERNEL_BUILD_PATH)/.config: $(TOOLCHAIN_PATH)/.installed kernel.config arch/$(
cd $(KERNEL_BUILD_PATH) && ARCH=$(KERNEL_ARCH) $(KERNEL_PATH)/scripts/kconfig/merge_config.sh -n $(KERNEL_BUILD_PATH)/.config $(KERNEL_BUILD_PATH)/minimal.config
$(if $(findstring yes,$(DEBUG_KERNEL)),cp debug.config $(KERNEL_BUILD_PATH) && cd $(KERNEL_BUILD_PATH) && ARCH=$(KERNEL_ARCH) $(KERNEL_PATH)/scripts/kconfig/merge_config.sh -n $(KERNEL_BUILD_PATH)/.config debug.config,)
-$(KERNEL_BZIMAGE): $(TOOLCHAIN_PATH)/.installed $(KERNEL_BUILD_PATH)/.config $(BUILD_PATH)/init-cpio-spec.txt $(IPERF_PATH)/src/iperf3 $(IPUTILS_PATH)/ping $(BASH_PATH)/bash $(IPROUTE2_PATH)/misc/ss $(IPROUTE2_PATH)/ip/ip $(IPTABLES_PATH)/iptables/xtables-legacy-multi $(NMAP_PATH)/ncat/ncat $(WIREGUARD_TOOLS_PATH)/src/wg $(BUILD_PATH)/init ../netns.sh $(WIREGUARD_SOURCES)
+$(KERNEL_BZIMAGE): $(TOOLCHAIN_PATH)/.installed $(KERNEL_BUILD_PATH)/.config $(BUILD_PATH)/init-cpio-spec.txt $(IPERF_PATH)/src/iperf3 $(IPUTILS_PATH)/ping $(BASH_PATH)/bash $(IPROUTE2_PATH)/misc/ss $(IPROUTE2_PATH)/ip/ip $(IPTABLES_PATH)/iptables/xtables-legacy-multi $(NMAP_PATH)/ncat/ncat $(WIREGUARD_TOOLS_PATH)/src/wg $(BUILD_PATH)/init
$(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE)
+.PHONY: $(KERNEL_BZIMAGE)
$(TOOLCHAIN_PATH)/$(CHOST)/include/linux/.installed: | $(KERNEL_BUILD_PATH)/.config $(TOOLCHAIN_PATH)/.installed
rm -rf $(TOOLCHAIN_PATH)/$(CHOST)/include/linux
diff --git a/tools/testing/selftests/wireguard/qemu/arch/arm.config b/tools/testing/selftests/wireguard/qemu/arch/arm.config
index fc7959bef9c2..0579c66be83e 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/arm.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/arm.config
@@ -7,6 +7,7 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_VIRTIO_MENU=y
CONFIG_VIRTIO_MMIO=y
CONFIG_VIRTIO_CONSOLE=y
+CONFIG_COMPAT_32BIT_TIME=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyAMA0 wg.success=vport0p1 panic_on_warn=1"
CONFIG_FRAME_WARN=1024
diff --git a/tools/testing/selftests/wireguard/qemu/arch/armeb.config b/tools/testing/selftests/wireguard/qemu/arch/armeb.config
index f3066be81c19..2a3307bbe534 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/armeb.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/armeb.config
@@ -7,6 +7,7 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_VIRTIO_MENU=y
CONFIG_VIRTIO_MMIO=y
CONFIG_VIRTIO_CONSOLE=y
+CONFIG_COMPAT_32BIT_TIME=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyAMA0 wg.success=vport0p1 panic_on_warn=1"
CONFIG_CPU_BIG_ENDIAN=y
diff --git a/tools/testing/selftests/wireguard/qemu/arch/i686.config b/tools/testing/selftests/wireguard/qemu/arch/i686.config
index 6d90892a85a2..35b06502606f 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/i686.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/i686.config
@@ -1,6 +1,10 @@
-CONFIG_ACPI=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_VIRTIO_MENU=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+CONFIG_COMPAT_32BIT_TIME=y
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1"
+CONFIG_CMDLINE="console=ttyS0 wg.success=vport0p1 panic_on_warn=1 reboot=t"
CONFIG_FRAME_WARN=1024
diff --git a/tools/testing/selftests/wireguard/qemu/arch/m68k.config b/tools/testing/selftests/wireguard/qemu/arch/m68k.config
index 82c925e49beb..39c48cba56b7 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/m68k.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/m68k.config
@@ -1,9 +1,7 @@
CONFIG_MMU=y
+CONFIG_VIRT=y
CONFIG_M68KCLASSIC=y
-CONFIG_M68040=y
-CONFIG_MAC=y
-CONFIG_SERIAL_PMACZILOG=y
-CONFIG_SERIAL_PMACZILOG_TTYS=y
-CONFIG_SERIAL_PMACZILOG_CONSOLE=y
-CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1"
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_COMPAT_32BIT_TIME=y
+CONFIG_CMDLINE="console=ttyGF0 wg.success=vport0p1 panic_on_warn=1"
CONFIG_FRAME_WARN=1024
diff --git a/tools/testing/selftests/wireguard/qemu/arch/mips.config b/tools/testing/selftests/wireguard/qemu/arch/mips.config
index d7ec63c17b30..2a84402353ab 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/mips.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/mips.config
@@ -6,6 +6,7 @@ CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_SYSCON=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_COMPAT_32BIT_TIME=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1"
CONFIG_FRAME_WARN=1024
diff --git a/tools/testing/selftests/wireguard/qemu/arch/mipsel.config b/tools/testing/selftests/wireguard/qemu/arch/mipsel.config
index 18a498293737..56146a101e7e 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/mipsel.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/mipsel.config
@@ -7,6 +7,7 @@ CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_SYSCON=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_COMPAT_32BIT_TIME=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1"
CONFIG_FRAME_WARN=1024
diff --git a/tools/testing/selftests/wireguard/qemu/arch/powerpc.config b/tools/testing/selftests/wireguard/qemu/arch/powerpc.config
index 5e04882e8e35..174a9ffe2a36 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/powerpc.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/powerpc.config
@@ -4,6 +4,7 @@ CONFIG_PPC_85xx=y
CONFIG_PHYS_64BIT=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_COMPAT_32BIT_TIME=y
CONFIG_MATH_EMULATION=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1"
diff --git a/tools/testing/selftests/wireguard/qemu/arch/um.config b/tools/testing/selftests/wireguard/qemu/arch/um.config
new file mode 100644
index 000000000000..c8b229e0810e
--- /dev/null
+++ b/tools/testing/selftests/wireguard/qemu/arch/um.config
@@ -0,0 +1,3 @@
+CONFIG_64BIT=y
+CONFIG_CMDLINE="wg.success=tty1 panic_on_warn=1"
+CONFIG_FRAME_WARN=1280
diff --git a/tools/testing/selftests/wireguard/qemu/arch/x86_64.config b/tools/testing/selftests/wireguard/qemu/arch/x86_64.config
index efa00693e08b..cf2d1376d121 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/x86_64.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/x86_64.config
@@ -1,6 +1,9 @@
-CONFIG_ACPI=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_VIRTIO_MENU=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1"
+CONFIG_CMDLINE="console=ttyS0 wg.success=vport0p1 panic_on_warn=1 reboot=t"
CONFIG_FRAME_WARN=1280
diff --git a/tools/testing/selftests/wireguard/qemu/debug.config b/tools/testing/selftests/wireguard/qemu/debug.config
index 2b321b8a96cf..9d172210e2c6 100644
--- a/tools/testing/selftests/wireguard/qemu/debug.config
+++ b/tools/testing/selftests/wireguard/qemu/debug.config
@@ -18,15 +18,12 @@ CONFIG_DEBUG_VM=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_HAVE_DEBUG_STACKOVERFLOW=y
CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_HAVE_ARCH_KMEMCHECK=y
CONFIG_HAVE_ARCH_KASAN=y
CONFIG_KASAN=y
CONFIG_KASAN_INLINE=y
CONFIG_UBSAN=y
CONFIG_UBSAN_SANITIZE_ALL=y
-CONFIG_UBSAN_NULL=y
CONFIG_DEBUG_KMEMLEAK=y
-CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=8192
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_WQ_WATCHDOG=y
@@ -35,7 +32,6 @@ CONFIG_SCHED_INFO=y
CONFIG_SCHEDSTATS=y
CONFIG_SCHED_STACK_END_CHECK=y
CONFIG_DEBUG_TIMEKEEPING=y
-CONFIG_TIMER_STATS=y
CONFIG_DEBUG_PREEMPT=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_SPINLOCK=y
@@ -49,7 +45,6 @@ CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_PLIST=y
CONFIG_PROVE_RCU=y
-CONFIG_SPARSE_RCU_POINTER=y
CONFIG_RCU_CPU_STALL_TIMEOUT=21
CONFIG_RCU_TRACE=y
CONFIG_RCU_EQS_DEBUG=y
diff --git a/tools/testing/selftests/wireguard/qemu/init.c b/tools/testing/selftests/wireguard/qemu/init.c
index 2a0f48fac925..3e49924dd77e 100644
--- a/tools/testing/selftests/wireguard/qemu/init.c
+++ b/tools/testing/selftests/wireguard/qemu/init.c
@@ -11,6 +11,7 @@
#include <stdlib.h>
#include <stdbool.h>
#include <fcntl.h>
+#include <time.h>
#include <sys/wait.h>
#include <sys/mount.h>
#include <sys/stat.h>
@@ -21,6 +22,7 @@
#include <sys/utsname.h>
#include <sys/sendfile.h>
#include <sys/sysmacros.h>
+#include <sys/random.h>
#include <linux/random.h>
#include <linux/version.h>
@@ -58,6 +60,8 @@ static void seed_rng(void)
{
int bits = 256, fd;
+ if (!getrandom(NULL, 0, GRND_NONBLOCK))
+ return;
pretty_message("[+] Fake seeding RNG...");
fd = open("/dev/random", O_WRONLY);
if (fd < 0)
@@ -67,6 +71,15 @@ static void seed_rng(void)
close(fd);
}
+static void set_time(void)
+{
+ if (time(NULL))
+ return;
+ pretty_message("[+] Setting fake time...");
+ if (stime(&(time_t){1433512680}) < 0)
+ panic("settimeofday()");
+}
+
static void mount_filesystems(void)
{
pretty_message("[+] Mounting filesystems...");
@@ -256,6 +269,7 @@ int main(int argc, char *argv[])
print_banner();
mount_filesystems();
seed_rng();
+ set_time();
kmod_selftests();
enable_logging();
clear_leaks();
diff --git a/tools/testing/selftests/wireguard/qemu/kernel.config b/tools/testing/selftests/wireguard/qemu/kernel.config
index a9b5a520a1d2..ce2a04717300 100644
--- a/tools/testing/selftests/wireguard/qemu/kernel.config
+++ b/tools/testing/selftests/wireguard/qemu/kernel.config
@@ -19,7 +19,6 @@ CONFIG_NETFILTER_XTABLES=y
CONFIG_NETFILTER_XT_NAT=y
CONFIG_NETFILTER_XT_MATCH_LENGTH=y
CONFIG_NETFILTER_XT_MARK=y
-CONFIG_NF_NAT_IPV4=y
CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_MANGLE=y
@@ -31,6 +30,7 @@ CONFIG_TTY=y
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_SCRIPT=y
CONFIG_VDSO=y
+CONFIG_STRICT_KERNEL_RWX=y
CONFIG_VIRTUALIZATION=y
CONFIG_HYPERVISOR_GUEST=y
CONFIG_PARAVIRT=y
@@ -57,7 +57,6 @@ CONFIG_NO_HZ_IDLE=y
CONFIG_NO_HZ_FULL=n
CONFIG_HZ_PERIODIC=n
CONFIG_HIGH_RES_TIMERS=y
-CONFIG_ARCH_RANDOM=y
CONFIG_FILE_LOCKING=y
CONFIG_POSIX_TIMERS=y
CONFIG_DEVTMPFS=y
@@ -65,6 +64,8 @@ CONFIG_PROC_FS=y
CONFIG_PROC_SYSCTL=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
+CONFIG_RANDOM_TRUST_CPU=y
+CONFIG_RANDOM_TRUST_BOOTLOADER=y
CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15
CONFIG_LOG_BUF_SHIFT=18
CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/x86/amx.c b/tools/testing/selftests/x86/amx.c
index 2189f0322d8b..625e42901237 100644
--- a/tools/testing/selftests/x86/amx.c
+++ b/tools/testing/selftests/x86/amx.c
@@ -17,6 +17,8 @@
#include <sys/syscall.h>
#include <sys/wait.h>
+#include "../kselftest.h" /* For __cpuid_count() */
+
#ifndef __x86_64__
# error This test is 64-bit only
#endif
@@ -45,13 +47,6 @@ static inline uint64_t xgetbv(uint32_t index)
return eax + ((uint64_t)edx << 32);
}
-static inline void cpuid(uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
-{
- asm volatile("cpuid;"
- : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
- : "0" (*eax), "2" (*ecx));
-}
-
static inline void xsave(struct xsave_buffer *xbuf, uint64_t rfbm)
{
uint32_t rfbm_lo = rfbm;
@@ -115,9 +110,7 @@ static inline void check_cpuid_xsave(void)
* support for the XSAVE feature set, including
* XGETBV.
*/
- eax = 1;
- ecx = 0;
- cpuid(&eax, &ebx, &ecx, &edx);
+ __cpuid_count(1, 0, eax, ebx, ecx, edx);
if (!(ecx & CPUID_LEAF1_ECX_XSAVE_MASK))
fatal_error("cpuid: no CPU xsave support");
if (!(ecx & CPUID_LEAF1_ECX_OSXSAVE_MASK))
@@ -140,9 +133,8 @@ static void check_cpuid_xtiledata(void)
{
uint32_t eax, ebx, ecx, edx;
- eax = CPUID_LEAF_XSTATE;
- ecx = CPUID_SUBLEAF_XSTATE_USER;
- cpuid(&eax, &ebx, &ecx, &edx);
+ __cpuid_count(CPUID_LEAF_XSTATE, CPUID_SUBLEAF_XSTATE_USER,
+ eax, ebx, ecx, edx);
/*
* EBX enumerates the size (in bytes) required by the XSAVE
@@ -153,10 +145,8 @@ static void check_cpuid_xtiledata(void)
*/
xbuf_size = ebx;
- eax = CPUID_LEAF_XSTATE;
- ecx = XFEATURE_XTILEDATA;
-
- cpuid(&eax, &ebx, &ecx, &edx);
+ __cpuid_count(CPUID_LEAF_XSTATE, XFEATURE_XTILEDATA,
+ eax, ebx, ecx, edx);
/*
* eax: XTILEDATA state component size
* ebx: XTILEDATA state component offset in user buffer
diff --git a/tools/testing/selftests/x86/corrupt_xstate_header.c b/tools/testing/selftests/x86/corrupt_xstate_header.c
index ab8599c10ce5..cf9ce8fbb656 100644
--- a/tools/testing/selftests/x86/corrupt_xstate_header.c
+++ b/tools/testing/selftests/x86/corrupt_xstate_header.c
@@ -17,25 +17,13 @@
#include <stdint.h>
#include <sys/wait.h>
-static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
-{
- asm volatile(
- "cpuid;"
- : "=a" (*eax),
- "=b" (*ebx),
- "=c" (*ecx),
- "=d" (*edx)
- : "0" (*eax), "2" (*ecx));
-}
+#include "../kselftest.h" /* For __cpuid_count() */
static inline int xsave_enabled(void)
{
unsigned int eax, ebx, ecx, edx;
- eax = 0x1;
- ecx = 0x0;
- __cpuid(&eax, &ebx, &ecx, &edx);
+ __cpuid_count(0x1, 0x0, eax, ebx, ecx, edx);
/* Is CR4.OSXSAVE enabled ? */
return ecx & (1U << 27);
diff --git a/tools/thermal/lib/Build b/tools/thermal/lib/Build
new file mode 100644
index 000000000000..06f22760a272
--- /dev/null
+++ b/tools/thermal/lib/Build
@@ -0,0 +1,3 @@
+libthermal_tools-y += mainloop.o
+libthermal_tools-y += log.o
+libthermal_tools-y += uptimeofday.o
diff --git a/tools/thermal/lib/Makefile b/tools/thermal/lib/Makefile
new file mode 100644
index 000000000000..82db451935c5
--- /dev/null
+++ b/tools/thermal/lib/Makefile
@@ -0,0 +1,158 @@
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+# Most of this file is copied from tools/lib/perf/Makefile
+
+LIBTHERMAL_TOOLS_VERSION = 0
+LIBTHERMAL_TOOLS_PATCHLEVEL = 0
+LIBTHERMAL_TOOLS_EXTRAVERSION = 1
+
+MAKEFLAGS += --no-print-directory
+
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+# $(info Determined 'srctree' to be $(srctree))
+endif
+
+INSTALL = install
+
+# Use DESTDIR for installing into a different root directory.
+# This is useful for building a package. The program will be
+# installed in this directory as if it was the root directory.
+# Then the build tool can move it later.
+DESTDIR ?=
+DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
+
+include $(srctree)/tools/scripts/Makefile.include
+include $(srctree)/tools/scripts/Makefile.arch
+
+ifeq ($(LP64), 1)
+ libdir_relative = lib64
+else
+ libdir_relative = lib
+endif
+
+prefix ?=
+libdir = $(prefix)/$(libdir_relative)
+
+# Shell quotes
+libdir_SQ = $(subst ','\'',$(libdir))
+libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
+
+ifeq ("$(origin V)", "command line")
+ VERBOSE = $(V)
+endif
+ifndef VERBOSE
+ VERBOSE = 0
+endif
+
+ifeq ($(VERBOSE),1)
+ Q =
+else
+ Q = @
+endif
+
+# Set compile option CFLAGS
+ifdef EXTRA_CFLAGS
+ CFLAGS := $(EXTRA_CFLAGS)
+else
+ CFLAGS := -g -Wall
+endif
+
+INCLUDES = \
+-I/usr/include/libnl3 \
+-I$(srctree)/tools/lib/thermal/include \
+-I$(srctree)/tools/lib/ \
+-I$(srctree)/tools/include \
+-I$(srctree)/tools/arch/$(SRCARCH)/include/ \
+-I$(srctree)/tools/arch/$(SRCARCH)/include/uapi \
+-I$(srctree)/tools/include/uapi
+
+# Append required CFLAGS
+override CFLAGS += $(EXTRA_WARNINGS)
+override CFLAGS += -Werror -Wall
+override CFLAGS += -fPIC
+override CFLAGS += $(INCLUDES)
+override CFGLAS += -Wl,-L.
+override CFGLAS += -Wl,-lthermal
+
+all:
+
+export srctree OUTPUT CC LD CFLAGS V
+export DESTDIR DESTDIR_SQ
+
+include $(srctree)/tools/build/Makefile.include
+
+PATCHLEVEL = $(LIBTHERMAL_TOOLS_PATCHLEVEL)
+EXTRAVERSION = $(LIBTHERMAL_TOOLS_EXTRAVERSION)
+VERSION = $(LIBTHERMAL_TOOLS_VERSION).$(LIBTHERMAL_TOOLS_PATCHLEVEL).$(LIBTHERMAL_TOOLS_EXTRAVERSION)
+
+LIBTHERMAL_TOOLS_SO := $(OUTPUT)libthermal_tools.so.$(VERSION)
+LIBTHERMAL_TOOLS_A := $(OUTPUT)libthermal_tools.a
+LIBTHERMAL_TOOLS_IN := $(OUTPUT)libthermal_tools-in.o
+LIBTHERMAL_TOOLS_PC := $(OUTPUT)libthermal_tools.pc
+
+LIBTHERMAL_TOOLS_ALL := $(LIBTHERMAL_TOOLS_A) $(OUTPUT)libthermal_tools.so*
+
+$(LIBTHERMAL_TOOLS_IN): FORCE
+ $(Q)$(MAKE) $(build)=libthermal_tools
+
+$(LIBTHERMAL_TOOLS_A): $(LIBTHERMAL_TOOLS_IN)
+ $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIBTHERMAL_TOOLS_IN)
+
+$(LIBTHERMAL_TOOLS_SO): $(LIBTHERMAL_TOOLS_IN)
+ $(QUIET_LINK)$(CC) --shared -Wl,-soname,libthermal_tools.so $^ -o $@
+ @ln -sf $(@F) $(OUTPUT)libthermal_tools.so
+ @ln -sf $(@F) $(OUTPUT)libthermal_tools.so.$(LIBTHERMAL_TOOLS_VERSION)
+
+
+libs: $(LIBTHERMAL_TOOLS_A) $(LIBTHERMAL_TOOLS_SO) $(LIBTHERMAL_TOOLS_PC)
+
+all: fixdep
+ $(Q)$(MAKE) libs
+
+clean:
+ $(call QUIET_CLEAN, libthermal_tools) $(RM) $(LIBTHERMAL_TOOLS_A) \
+ *.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBTHERMAL_TOOLS_VERSION) .*.d .*.cmd LIBTHERMAL_TOOLS-CFLAGS $(LIBTHERMAL_TOOLS_PC)
+
+$(LIBTHERMAL_TOOLS_PC):
+ $(QUIET_GEN)sed -e "s|@PREFIX@|$(prefix)|" \
+ -e "s|@LIBDIR@|$(libdir_SQ)|" \
+ -e "s|@VERSION@|$(VERSION)|" \
+ < libthermal_tools.pc.template > $@
+
+define do_install_mkdir
+ if [ ! -d '$(DESTDIR_SQ)$1' ]; then \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
+ fi
+endef
+
+define do_install
+ if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
+ fi; \
+ $(INSTALL) $1 $(if $3,-m $3,) '$(DESTDIR_SQ)$2'
+endef
+
+install_lib: libs
+ $(call QUIET_INSTALL, $(LIBTHERMAL_TOOLS_ALL)) \
+ $(call do_install_mkdir,$(libdir_SQ)); \
+ cp -fpR $(LIBTHERMAL_TOOLS_ALL) $(DESTDIR)$(libdir_SQ)
+
+install_headers:
+ $(call QUIET_INSTALL, headers) \
+ $(call do_install,include/thermal.h,$(prefix)/include/thermal,644); \
+
+install_pkgconfig: $(LIBTHERMAL_TOOLS_PC)
+ $(call QUIET_INSTALL, $(LIBTHERMAL_TOOLS_PC)) \
+ $(call do_install,$(LIBTHERMAL_TOOLS_PC),$(libdir_SQ)/pkgconfig,644)
+
+install_doc:
+ $(Q)$(MAKE) -C Documentation install-man install-html install-examples
+
+#install: install_lib install_headers install_pkgconfig install_doc
+install: install_lib install_headers install_pkgconfig
+
+FORCE:
+
+.PHONY: all install clean FORCE
diff --git a/tools/thermal/lib/libthermal_tools.pc.template b/tools/thermal/lib/libthermal_tools.pc.template
new file mode 100644
index 000000000000..6f3769731b59
--- /dev/null
+++ b/tools/thermal/lib/libthermal_tools.pc.template
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+prefix=@PREFIX@
+libdir=@LIBDIR@
+includedir=${prefix}/include
+
+Name: libthermal
+Description: thermal library
+Requires: libnl-3.0 libnl-genl-3.0
+Version: @VERSION@
+Libs: -L${libdir} -lnl-genl-3 -lnl-3
+Cflags: -I${includedir} -I{include}/libnl3
diff --git a/tools/thermal/lib/log.c b/tools/thermal/lib/log.c
new file mode 100644
index 000000000000..597d6e7f7858
--- /dev/null
+++ b/tools/thermal/lib/log.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: LGPL-2.1+
+// Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <syslog.h>
+#include "log.h"
+
+static const char *__ident = "unknown";
+static int __options;
+
+static const char * const loglvl[] = {
+ [LOG_DEBUG] = "DEBUG",
+ [LOG_INFO] = "INFO",
+ [LOG_NOTICE] = "NOTICE",
+ [LOG_WARNING] = "WARN",
+ [LOG_ERR] = "ERROR",
+ [LOG_CRIT] = "CRITICAL",
+ [LOG_ALERT] = "ALERT",
+ [LOG_EMERG] = "EMERG",
+};
+
+int log_str2level(const char *lvl)
+{
+ int i;
+
+ for (i = 0; i < sizeof(loglvl) / sizeof(loglvl[LOG_DEBUG]); i++)
+ if (!strcmp(lvl, loglvl[i]))
+ return i;
+
+ return LOG_DEBUG;
+}
+
+extern void logit(int level, const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+
+ if (__options & TO_SYSLOG)
+ vsyslog(level, format, args);
+
+ if (__options & TO_STDERR)
+ vfprintf(stderr, format, args);
+
+ if (__options & TO_STDOUT)
+ vfprintf(stdout, format, args);
+
+ va_end(args);
+}
+
+int log_init(int level, const char *ident, int options)
+{
+ if (!options)
+ return -1;
+
+ if (level > LOG_DEBUG)
+ return -1;
+
+ if (!ident)
+ return -1;
+
+ __ident = ident;
+ __options = options;
+
+ if (options & TO_SYSLOG) {
+ openlog(__ident, options | LOG_NDELAY, LOG_USER);
+ setlogmask(LOG_UPTO(level));
+ }
+
+ return 0;
+}
+
+void log_exit(void)
+{
+ closelog();
+}
diff --git a/tools/thermal/lib/log.h b/tools/thermal/lib/log.h
new file mode 100644
index 000000000000..be8ab5144938
--- /dev/null
+++ b/tools/thermal/lib/log.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+/* Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org> */
+#ifndef __THERMAL_TOOLS_LOG_H
+#define __THERMAL_TOOLS_LOG_H
+
+#include <syslog.h>
+
+#ifndef __maybe_unused
+#define __maybe_unused __attribute__((__unused__))
+#endif
+
+#define TO_SYSLOG 0x1
+#define TO_STDOUT 0x2
+#define TO_STDERR 0x4
+
+extern void logit(int level, const char *format, ...);
+
+#define DEBUG(fmt, ...) logit(LOG_DEBUG, "%s:%d: " fmt, __func__, __LINE__, ##__VA_ARGS__)
+#define INFO(fmt, ...) logit(LOG_INFO, fmt, ##__VA_ARGS__)
+#define NOTICE(fmt, ...) logit(LOG_NOTICE, fmt, ##__VA_ARGS__)
+#define WARN(fmt, ...) logit(LOG_WARNING, fmt, ##__VA_ARGS__)
+#define ERROR(fmt, ...) logit(LOG_ERR, fmt, ##__VA_ARGS__)
+#define CRITICAL(fmt, ...) logit(LOG_CRIT, fmt, ##__VA_ARGS__)
+#define ALERT(fmt, ...) logit(LOG_ALERT, fmt, ##__VA_ARGS__)
+#define EMERG(fmt, ...) logit(LOG_EMERG, fmt, ##__VA_ARGS__)
+
+int log_init(int level, const char *ident, int options);
+int log_str2level(const char *lvl);
+void log_exit(void);
+
+#endif
diff --git a/tools/thermal/lib/mainloop.c b/tools/thermal/lib/mainloop.c
new file mode 100644
index 000000000000..94cbbcbd1c14
--- /dev/null
+++ b/tools/thermal/lib/mainloop.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: LGPL-2.1+
+// Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org>
+#include <stdlib.h>
+#include <errno.h>
+#include <unistd.h>
+#include <signal.h>
+#include <sys/epoll.h>
+#include "mainloop.h"
+#include "log.h"
+
+static int epfd = -1;
+static unsigned short nrhandler;
+static sig_atomic_t exit_mainloop;
+
+struct mainloop_data {
+ mainloop_callback_t cb;
+ void *data;
+ int fd;
+};
+
+static struct mainloop_data **mds;
+
+#define MAX_EVENTS 10
+
+int mainloop(unsigned int timeout)
+{
+ int i, nfds;
+ struct epoll_event events[MAX_EVENTS];
+ struct mainloop_data *md;
+
+ if (epfd < 0)
+ return -1;
+
+ for (;;) {
+
+ nfds = epoll_wait(epfd, events, MAX_EVENTS, timeout);
+
+ if (exit_mainloop || !nfds)
+ return 0;
+
+ if (nfds < 0) {
+ if (errno == EINTR)
+ continue;
+ return -1;
+ }
+
+ for (i = 0; i < nfds; i++) {
+ md = events[i].data.ptr;
+
+ if (md->cb(md->fd, md->data) > 0)
+ return 0;
+ }
+ }
+}
+
+int mainloop_add(int fd, mainloop_callback_t cb, void *data)
+{
+ struct epoll_event ev = {
+ .events = EPOLLIN,
+ };
+
+ struct mainloop_data *md;
+
+ if (fd >= nrhandler) {
+ mds = realloc(mds, sizeof(*mds) * (fd + 1));
+ if (!mds)
+ return -1;
+ nrhandler = fd + 1;
+ }
+
+ md = malloc(sizeof(*md));
+ if (!md)
+ return -1;
+
+ md->data = data;
+ md->cb = cb;
+ md->fd = fd;
+
+ mds[fd] = md;
+ ev.data.ptr = md;
+
+ if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &ev) < 0) {
+ free(md);
+ return -1;
+ }
+
+ return 0;
+}
+
+int mainloop_del(int fd)
+{
+ if (fd >= nrhandler)
+ return -1;
+
+ if (epoll_ctl(epfd, EPOLL_CTL_DEL, fd, NULL) < 0)
+ return -1;
+
+ free(mds[fd]);
+
+ return 0;
+}
+
+int mainloop_init(void)
+{
+ epfd = epoll_create(2);
+ if (epfd < 0)
+ return -1;
+
+ return 0;
+}
+
+void mainloop_exit(void)
+{
+ exit_mainloop = 1;
+}
+
+void mainloop_fini(void)
+{
+ close(epfd);
+}
diff --git a/tools/thermal/lib/mainloop.h b/tools/thermal/lib/mainloop.h
new file mode 100644
index 000000000000..89b61e89d905
--- /dev/null
+++ b/tools/thermal/lib/mainloop.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+/* Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org> */
+#ifndef __THERMAL_TOOLS_MAINLOOP_H
+#define __THERMAL_TOOLS_MAINLOOP_H
+
+typedef int (*mainloop_callback_t)(int fd, void *data);
+
+extern int mainloop(unsigned int timeout);
+extern int mainloop_add(int fd, mainloop_callback_t cb, void *data);
+extern int mainloop_del(int fd);
+extern void mainloop_exit(void);
+extern int mainloop_init(void);
+extern void mainloop_fini(void);
+
+#endif
diff --git a/tools/thermal/lib/thermal-tools.h b/tools/thermal/lib/thermal-tools.h
new file mode 100644
index 000000000000..f43939a468a3
--- /dev/null
+++ b/tools/thermal/lib/thermal-tools.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+/* Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org> */
+#ifndef __THERMAL_TOOLS
+#define __THERMAL_TOOLS
+
+#include "log.h"
+#include "mainloop.h"
+#include "uptimeofday.h"
+
+#endif
diff --git a/tools/thermal/lib/uptimeofday.c b/tools/thermal/lib/uptimeofday.c
new file mode 100644
index 000000000000..dacb02956a68
--- /dev/null
+++ b/tools/thermal/lib/uptimeofday.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: LGPL-2.1+
+// Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org>
+#include <stdio.h>
+#include <sys/time.h>
+#include <linux/sysinfo.h>
+#include "thermal-tools.h"
+
+static unsigned long __offset;
+static struct timeval __tv;
+
+int uptimeofday_init(void)
+{
+ struct sysinfo info;
+
+ if (sysinfo(&info))
+ return -1;
+
+ gettimeofday(&__tv, NULL);
+
+ __offset = __tv.tv_sec - info.uptime;
+
+ return 0;
+}
+
+unsigned long getuptimeofday_ms(void)
+{
+ gettimeofday(&__tv, NULL);
+
+ return ((__tv.tv_sec - __offset) * 1000) + (__tv.tv_usec / 1000);
+}
+
+struct timespec msec_to_timespec(int msec)
+{
+ struct timespec tv = {
+ .tv_sec = (msec / 1000),
+ .tv_nsec = (msec % 1000) * 1000000,
+ };
+
+ return tv;
+}
diff --git a/tools/thermal/lib/uptimeofday.h b/tools/thermal/lib/uptimeofday.h
new file mode 100644
index 000000000000..c0da5de41325
--- /dev/null
+++ b/tools/thermal/lib/uptimeofday.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+/* Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org> */
+#ifndef __THERMAL_TOOLS_UPTIMEOFDAY_H
+#define __THERMAL_TOOLS_UPTIMEOFDAY_H
+#include <sys/sysinfo.h>
+#include <sys/time.h>
+
+int uptimeofday_init(void);
+unsigned long getuptimeofday_ms(void);
+struct timespec msec_to_timespec(int msec);
+
+#endif
diff --git a/tools/thermal/thermal-engine/Build b/tools/thermal/thermal-engine/Build
new file mode 100644
index 000000000000..20c3c478b88d
--- /dev/null
+++ b/tools/thermal/thermal-engine/Build
@@ -0,0 +1 @@
+thermal-engine-y += thermal-engine.o
diff --git a/tools/thermal/thermal-engine/Makefile b/tools/thermal/thermal-engine/Makefile
new file mode 100644
index 000000000000..6bd05ff89485
--- /dev/null
+++ b/tools/thermal/thermal-engine/Makefile
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for thermal tools
+
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+# $(info Determined 'srctree' to be $(srctree))
+endif
+
+CFLAGS = -Wall -Wextra
+CFLAGS += -I$(srctree)/tools/thermal/lib
+CFLAGS += -I$(srctree)/tools/lib/thermal/include
+
+LDFLAGS = -L$(srctree)/tools/thermal/lib
+LDFLAGS += -L$(srctree)/tools/lib/thermal
+LDFLAGS += -lthermal_tools
+LDFLAGS += -lthermal
+LDFLAGS += -lconfig
+LDFLAGS += -lnl-genl-3 -lnl-3
+
+VERSION = 0.0.1
+
+all: thermal-engine
+%: %.c
+ $(CC) $(CFLAGS) -D VERSION=\"$(VERSION)\" -o $@ $^ $(LDFLAGS)
+clean:
+ $(RM) thermal-engine
diff --git a/tools/thermal/thermal-engine/thermal-engine.c b/tools/thermal/thermal-engine/thermal-engine.c
new file mode 100644
index 000000000000..9b1476a2680f
--- /dev/null
+++ b/tools/thermal/thermal-engine/thermal-engine.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Thermal monitoring tool based on the thermal netlink events.
+ *
+ * Copyright (C) 2022 Linaro Ltd.
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@kernel.org>
+ */
+#include <errno.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <libgen.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <signal.h>
+#include <unistd.h>
+
+#include <syslog.h>
+
+#include <sys/epoll.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <thermal.h>
+#include "thermal-tools.h"
+
+struct options {
+ int loglevel;
+ int logopt;
+ int interactive;
+ int daemonize;
+};
+
+struct thermal_data {
+ struct thermal_zone *tz;
+ struct thermal_handler *th;
+};
+
+static int show_trip(struct thermal_trip *tt, __maybe_unused void *arg)
+{
+ INFO("trip id=%d, type=%d, temp=%d, hyst=%d\n",
+ tt->id, tt->type, tt->temp, tt->hyst);
+
+ return 0;
+}
+
+static int show_temp(struct thermal_zone *tz, __maybe_unused void *arg)
+{
+ thermal_cmd_get_temp(arg, tz);
+
+ INFO("temperature: %d\n", tz->temp);
+
+ return 0;
+}
+
+static int show_governor(struct thermal_zone *tz, __maybe_unused void *arg)
+{
+ thermal_cmd_get_governor(arg, tz);
+
+ INFO("governor: '%s'\n", tz->governor);
+
+ return 0;
+}
+
+static int show_tz(struct thermal_zone *tz, __maybe_unused void *arg)
+{
+ INFO("thermal zone '%s', id=%d\n", tz->name, tz->id);
+
+ for_each_thermal_trip(tz->trip, show_trip, NULL);
+
+ show_temp(tz, arg);
+
+ show_governor(tz, arg);
+
+ return 0;
+}
+
+static int tz_create(const char *name, int tz_id, __maybe_unused void *arg)
+{
+ INFO("Thermal zone '%s'/%d created\n", name, tz_id);
+
+ return 0;
+}
+
+static int tz_delete(int tz_id, __maybe_unused void *arg)
+{
+ INFO("Thermal zone %d deleted\n", tz_id);
+
+ return 0;
+}
+
+static int tz_disable(int tz_id, void *arg)
+{
+ struct thermal_data *td = arg;
+ struct thermal_zone *tz = thermal_zone_find_by_id(td->tz, tz_id);
+
+ INFO("Thermal zone %d ('%s') disabled\n", tz_id, tz->name);
+
+ return 0;
+}
+
+static int tz_enable(int tz_id, void *arg)
+{
+ struct thermal_data *td = arg;
+ struct thermal_zone *tz = thermal_zone_find_by_id(td->tz, tz_id);
+
+ INFO("Thermal zone %d ('%s') enabled\n", tz_id, tz->name);
+
+ return 0;
+}
+
+static int trip_high(int tz_id, int trip_id, int temp, void *arg)
+{
+ struct thermal_data *td = arg;
+ struct thermal_zone *tz = thermal_zone_find_by_id(td->tz, tz_id);
+
+ INFO("Thermal zone %d ('%s'): trip point %d crossed way up with %d °C\n",
+ tz_id, tz->name, trip_id, temp);
+
+ return 0;
+}
+
+static int trip_low(int tz_id, int trip_id, int temp, void *arg)
+{
+ struct thermal_data *td = arg;
+ struct thermal_zone *tz = thermal_zone_find_by_id(td->tz, tz_id);
+
+ INFO("Thermal zone %d ('%s'): trip point %d crossed way down with %d °C\n",
+ tz_id, tz->name, trip_id, temp);
+
+ return 0;
+}
+
+static int trip_add(int tz_id, int trip_id, int type, int temp, int hyst, __maybe_unused void *arg)
+{
+ INFO("Trip point added %d: id=%d, type=%d, temp=%d, hyst=%d\n",
+ tz_id, trip_id, type, temp, hyst);
+
+ return 0;
+}
+
+static int trip_delete(int tz_id, int trip_id, __maybe_unused void *arg)
+{
+ INFO("Trip point deleted %d: id=%d\n", tz_id, trip_id);
+
+ return 0;
+}
+
+static int trip_change(int tz_id, int trip_id, int type, int temp,
+ int hyst, __maybe_unused void *arg)
+{
+ struct thermal_data *td = arg;
+ struct thermal_zone *tz = thermal_zone_find_by_id(td->tz, tz_id);
+
+ INFO("Trip point changed %d: id=%d, type=%d, temp=%d, hyst=%d\n",
+ tz_id, trip_id, type, temp, hyst);
+
+ tz->trip[trip_id].type = type;
+ tz->trip[trip_id].temp = temp;
+ tz->trip[trip_id].hyst = hyst;
+
+ return 0;
+}
+
+static int cdev_add(const char *name, int cdev_id, int max_state, __maybe_unused void *arg)
+{
+ INFO("Cooling device '%s'/%d (max state=%d) added\n", name, cdev_id, max_state);
+
+ return 0;
+}
+
+static int cdev_delete(int cdev_id, __maybe_unused void *arg)
+{
+ INFO("Cooling device %d deleted", cdev_id);
+
+ return 0;
+}
+
+static int cdev_update(int cdev_id, int cur_state, __maybe_unused void *arg)
+{
+ INFO("cdev:%d state:%d\n", cdev_id, cur_state);
+
+ return 0;
+}
+
+static int gov_change(int tz_id, const char *name, __maybe_unused void *arg)
+{
+ struct thermal_data *td = arg;
+ struct thermal_zone *tz = thermal_zone_find_by_id(td->tz, tz_id);
+
+ INFO("%s: governor changed %s -> %s\n", tz->name, tz->governor, name);
+
+ strcpy(tz->governor, name);
+
+ return 0;
+}
+
+static struct thermal_ops ops = {
+ .events.tz_create = tz_create,
+ .events.tz_delete = tz_delete,
+ .events.tz_disable = tz_disable,
+ .events.tz_enable = tz_enable,
+ .events.trip_high = trip_high,
+ .events.trip_low = trip_low,
+ .events.trip_add = trip_add,
+ .events.trip_delete = trip_delete,
+ .events.trip_change = trip_change,
+ .events.cdev_add = cdev_add,
+ .events.cdev_delete = cdev_delete,
+ .events.cdev_update = cdev_update,
+ .events.gov_change = gov_change
+};
+
+static int thermal_event(__maybe_unused int fd, __maybe_unused void *arg)
+{
+ struct thermal_data *td = arg;
+
+ return thermal_events_handle(td->th, td);
+}
+
+static void usage(const char *cmd)
+{
+ printf("%s : A thermal monitoring engine based on notifications\n", cmd);
+ printf("Usage: %s [options]\n", cmd);
+ printf("\t-h, --help\t\tthis help\n");
+ printf("\t-d, --daemonize\n");
+ printf("\t-l <level>, --loglevel <level>\tlog level: ");
+ printf("DEBUG, INFO, NOTICE, WARN, ERROR\n");
+ printf("\t-s, --syslog\t\toutput to syslog\n");
+ printf("\n");
+ exit(0);
+}
+
+static int options_init(int argc, char *argv[], struct options *options)
+{
+ int opt;
+
+ struct option long_options[] = {
+ { "help", no_argument, NULL, 'h' },
+ { "daemonize", no_argument, NULL, 'd' },
+ { "syslog", no_argument, NULL, 's' },
+ { "loglevel", required_argument, NULL, 'l' },
+ { 0, 0, 0, 0 }
+ };
+
+ while (1) {
+
+ int optindex = 0;
+
+ opt = getopt_long(argc, argv, "l:dhs", long_options, &optindex);
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'l':
+ options->loglevel = log_str2level(optarg);
+ break;
+ case 'd':
+ options->daemonize = 1;
+ break;
+ case 's':
+ options->logopt = TO_SYSLOG;
+ break;
+ case 'h':
+ usage(basename(argv[0]));
+ break;
+ default: /* '?' */
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+enum {
+ THERMAL_ENGINE_SUCCESS = 0,
+ THERMAL_ENGINE_OPTION_ERROR,
+ THERMAL_ENGINE_DAEMON_ERROR,
+ THERMAL_ENGINE_LOG_ERROR,
+ THERMAL_ENGINE_THERMAL_ERROR,
+ THERMAL_ENGINE_MAINLOOP_ERROR,
+};
+
+int main(int argc, char *argv[])
+{
+ struct thermal_data td;
+ struct options options = {
+ .loglevel = LOG_INFO,
+ .logopt = TO_STDOUT,
+ };
+
+ if (options_init(argc, argv, &options)) {
+ ERROR("Usage: %s --help\n", argv[0]);
+ return THERMAL_ENGINE_OPTION_ERROR;
+ }
+
+ if (options.daemonize && daemon(0, 0)) {
+ ERROR("Failed to daemonize: %p\n");
+ return THERMAL_ENGINE_DAEMON_ERROR;
+ }
+
+ if (log_init(options.loglevel, basename(argv[0]), options.logopt)) {
+ ERROR("Failed to initialize logging facility\n");
+ return THERMAL_ENGINE_LOG_ERROR;
+ }
+
+ td.th = thermal_init(&ops);
+ if (!td.th) {
+ ERROR("Failed to initialize the thermal library\n");
+ return THERMAL_ENGINE_THERMAL_ERROR;
+ }
+
+ td.tz = thermal_zone_discover(td.th);
+ if (!td.tz) {
+ ERROR("No thermal zone available\n");
+ return THERMAL_ENGINE_THERMAL_ERROR;
+ }
+
+ for_each_thermal_zone(td.tz, show_tz, td.th);
+
+ if (mainloop_init()) {
+ ERROR("Failed to initialize the mainloop\n");
+ return THERMAL_ENGINE_MAINLOOP_ERROR;
+ }
+
+ if (mainloop_add(thermal_events_fd(td.th), thermal_event, &td)) {
+ ERROR("Failed to setup the mainloop\n");
+ return THERMAL_ENGINE_MAINLOOP_ERROR;
+ }
+
+ INFO("Waiting for thermal events ...\n");
+
+ if (mainloop(-1)) {
+ ERROR("Mainloop failed\n");
+ return THERMAL_ENGINE_MAINLOOP_ERROR;
+ }
+
+ return THERMAL_ENGINE_SUCCESS;
+}
diff --git a/tools/thermal/thermometer/Build b/tools/thermal/thermometer/Build
new file mode 100644
index 000000000000..1b96c159c3c8
--- /dev/null
+++ b/tools/thermal/thermometer/Build
@@ -0,0 +1 @@
+thermometer-y += thermometer.o
diff --git a/tools/thermal/thermometer/Makefile b/tools/thermal/thermometer/Makefile
new file mode 100644
index 000000000000..d8f8bc82fe3b
--- /dev/null
+++ b/tools/thermal/thermometer/Makefile
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for cgroup tools
+
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+# $(info Determined 'srctree' to be $(srctree))
+endif
+
+CFLAGS = -Wall -Wextra
+CFLAGS += -I$(srctree)/tools/thermal/lib
+
+LDFLAGS = -L$(srctree)/tools/thermal/lib
+LDFLAGS += -lthermal_tools
+LDFLAGS += -lconfig
+
+VERSION = 0.0.1
+TARGET=thermometer
+
+all: $(TARGET)
+%: %.c
+ $(CC) $(CFLAGS) -D VERSION=\"$(VERSION)\" -o $@ $^ $(LDFLAGS)
+
+clean:
+ $(RM) $(TARGET)
diff --git a/tools/thermal/thermometer/thermometer.8 b/tools/thermal/thermometer/thermometer.8
new file mode 100644
index 000000000000..d090fbca4cba
--- /dev/null
+++ b/tools/thermal/thermometer/thermometer.8
@@ -0,0 +1,92 @@
+.TH THERMOMETER 8
+# SPDX-License-Identifier: GPL-2.0
+.SH NAME
+\fBthermometer\fP - A thermal profiling tool
+
+.SH SYNOPSIS
+.ft B
+.B thermometer
+.RB [ options ]
+.RB [ command ]
+.br
+.SH DESCRIPTION
+\fBthermometer \fP captures the thermal zones temperature at a
+specified sampling period. It is optimized to reduce as much as
+possible the overhead while doing the temperature acquisition in order
+to prevent disrupting the running application we may want to profile.
+
+This low overhead also allows a high rate sampling for the temperature
+which could be necessary to spot overshots and undershots.
+
+If no configuration file is specified, then all the thermal zones will
+be monitored at 4Hz, so every 250ms. A configuration file specifies
+the thermal zone names and the desired sampling period. A thermal zone
+name can be a regular expression to specify a group of thermal zone.
+
+The sampling of the different thermal zones will be written into
+separate files with the thermal zone name. It is possible to specify a
+postfix to identify them for example for a specific scenario. The
+output directory can be specified in addition.
+
+Without any parameters, \fBthermometer \fP captures all the thermal
+zone temperatures every 250ms and write to the current directory the
+captured files postfixed with the current date.
+
+If a running \fBduration\fP is specified or a \fBcommand\fP, the
+capture ends at the end of the duration if the command did not
+finished before. The \fBduration\fP can be specified alone as well as
+the \fBcommand\fP. If none is specified, the capture will continue
+indefinitively until interrupted by \fBSIGINT\fP or \fBSIGQUIT\fP.
+.PP
+
+.SS Options
+.PP
+The \fB-h, --help\fP option shows a short usage help
+.PP
+The \fB-o <dir>, --output <dir>\fP option defines the output directory to put the
+sampling files
+.PP
+The \fB-c <config>, --config <config>\fP option specifies the configuration file to use
+.PP
+The \fB-d <seconds>, --duration <seconds>\fP option specifies the duration of the capture
+.PP
+The \fB-l <loglevel>, --loglevel <loglevel>\fP option sets the loglevel [DEBUG,INFO,NOTICE,WARN,ERROR]
+.PP
+The \fB-p <string>, --postfix <string>\fP option appends \fBstring\fP at the end of the capture filenames
+.PP
+The \fB-s, --syslog\fP option sets the output to syslog, default is \fBstdout\fP
+.PP
+The \fB-w, --overwrite\fP overwrites the output files if they exist
+.PP
+
+.PP
+
+.SS "Exit status:"
+.TP
+0
+if OK,
+.TP
+1
+Error with the options specified as parameters
+.TP
+2
+Error when configuring the logging facility
+.TP
+3
+Error when configuring the time
+.TP
+4
+Error in the initialization routine
+.TP
+5
+Error during the runtime
+
+.SH Capture file format
+
+Every file contains two columns. The first one is the uptime timestamp
+in order to find a point in time since the system started up if there
+is any thermal event. The second one is the temperature in milli
+degree. The first line contains the label of each column.
+
+.SH AUTHOR
+Daniel Lezcano <daniel.lezcano@kernel.org>
diff --git a/tools/thermal/thermometer/thermometer.c b/tools/thermal/thermometer/thermometer.c
new file mode 100644
index 000000000000..1a87a0a77f9f
--- /dev/null
+++ b/tools/thermal/thermometer/thermometer.c
@@ -0,0 +1,572 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org>
+#define _GNU_SOURCE
+#include <dirent.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <regex.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/signalfd.h>
+#include <sys/timerfd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <time.h>
+#include <unistd.h>
+#include <linux/thermal.h>
+
+#include <libconfig.h>
+#include "thermal-tools.h"
+
+#define CLASS_THERMAL "/sys/class/thermal"
+
+enum {
+ THERMOMETER_SUCCESS = 0,
+ THERMOMETER_OPTION_ERROR,
+ THERMOMETER_LOG_ERROR,
+ THERMOMETER_CONFIG_ERROR,
+ THERMOMETER_TIME_ERROR,
+ THERMOMETER_INIT_ERROR,
+ THERMOMETER_RUNTIME_ERROR
+};
+
+struct options {
+ int loglvl;
+ int logopt;
+ int overwrite;
+ int duration;
+ const char *config;
+ char postfix[PATH_MAX];
+ char output[PATH_MAX];
+};
+
+struct tz_regex {
+ regex_t regex;
+ int polling;
+};
+
+struct configuration {
+ struct tz_regex *tz_regex;
+ int nr_tz_regex;
+
+};
+
+struct tz {
+ FILE *file_out;
+ int fd_temp;
+ int fd_timer;
+ int polling;
+ const char *name;
+};
+
+struct thermometer {
+ struct tz *tz;
+ int nr_tz;
+};
+
+static struct tz_regex *configuration_tz_match(const char *expr,
+ struct configuration *config)
+{
+ int i;
+
+ for (i = 0; i < config->nr_tz_regex; i++) {
+
+ if (!regexec(&config->tz_regex[i].regex, expr, 0, NULL, 0))
+ return &config->tz_regex[i];
+ }
+
+ return NULL;
+}
+
+static int configuration_default_init(struct configuration *config)
+{
+ config->tz_regex = realloc(config->tz_regex, sizeof(*config->tz_regex) *
+ (config->nr_tz_regex + 1));
+
+ if (regcomp(&config->tz_regex[config->nr_tz_regex].regex, ".*",
+ REG_NOSUB | REG_EXTENDED)) {
+ ERROR("Invalid regular expression\n");
+ return -1;
+ }
+
+ config->tz_regex[config->nr_tz_regex].polling = 250;
+ config->nr_tz_regex = 1;
+
+ return 0;
+}
+
+static int configuration_init(const char *path, struct configuration *config)
+{
+ config_t cfg;
+
+ config_setting_t *tz;
+ int i, length;
+
+ if (path && access(path, F_OK)) {
+ ERROR("'%s' is not accessible\n", path);
+ return -1;
+ }
+
+ if (!path && !config->nr_tz_regex) {
+ INFO("No thermal zones configured, using wildcard for all of them\n");
+ return configuration_default_init(config);
+ }
+
+ config_init(&cfg);
+
+ if (!config_read_file(&cfg, path)) {
+ ERROR("Failed to parse %s:%d - %s\n", config_error_file(&cfg),
+ config_error_line(&cfg), config_error_text(&cfg));
+
+ return -1;
+ }
+
+ tz = config_lookup(&cfg, "thermal-zones");
+ if (!tz) {
+ ERROR("No thermal zone configured to be monitored\n");
+ return -1;
+ }
+
+ length = config_setting_length(tz);
+
+ INFO("Found %d thermal zone(s) regular expression\n", length);
+
+ for (i = 0; i < length; i++) {
+
+ config_setting_t *node;
+ const char *name;
+ int polling;
+
+ node = config_setting_get_elem(tz, i);
+ if (!node) {
+ ERROR("Missing node name '%d'\n", i);
+ return -1;
+ }
+
+ if (!config_setting_lookup_string(node, "name", &name)) {
+ ERROR("Thermal zone name not found\n");
+ return -1;
+ }
+
+ if (!config_setting_lookup_int(node, "polling", &polling)) {
+ ERROR("Polling value not found");
+ return -1;
+ }
+
+ config->tz_regex = realloc(config->tz_regex, sizeof(*config->tz_regex) *
+ (config->nr_tz_regex + 1));
+
+ if (regcomp(&config->tz_regex[config->nr_tz_regex].regex, name,
+ REG_NOSUB | REG_EXTENDED)) {
+ ERROR("Invalid regular expression '%s'\n", name);
+ continue;
+ }
+
+ config->tz_regex[config->nr_tz_regex].polling = polling;
+ config->nr_tz_regex++;
+
+ INFO("Thermal zone regular expression '%s' with polling %d\n",
+ name, polling);
+ }
+
+ return 0;
+}
+
+static void usage(const char *cmd)
+{
+ printf("%s Version: %s\n", cmd, VERSION);
+ printf("Usage: %s [options]\n", cmd);
+ printf("\t-h, --help\t\tthis help\n");
+ printf("\t-o, --output <dir>\toutput directory for temperature capture\n");
+ printf("\t-c, --config <file>\tconfiguration file\n");
+ printf("\t-d, --duration <seconds>\tcapture duration\n");
+ printf("\t-l, --loglevel <level>\tlog level: ");
+ printf("DEBUG, INFO, NOTICE, WARN, ERROR\n");
+ printf("\t-p, --postfix <string>\tpostfix to be happened at the end of the files\n");
+ printf("\t-s, --syslog\t\toutput to syslog\n");
+ printf("\t-w, --overwrite\t\toverwrite the temperature capture files if they exist\n");
+ printf("\n");
+ exit(0);
+}
+
+static int options_init(int argc, char *argv[], struct options *options)
+{
+ int opt;
+ time_t now = time(NULL);
+
+ struct option long_options[] = {
+ { "help", no_argument, NULL, 'h' },
+ { "config", required_argument, NULL, 'c' },
+ { "duration", required_argument, NULL, 'd' },
+ { "loglevel", required_argument, NULL, 'l' },
+ { "postfix", required_argument, NULL, 'p' },
+ { "output", required_argument, NULL, 'o' },
+ { "syslog", required_argument, NULL, 's' },
+ { "overwrite", no_argument, NULL, 'w' },
+ { 0, 0, 0, 0 }
+ };
+
+ strftime(options->postfix, sizeof(options->postfix),
+ "-%Y-%m-%d_%H:%M:%S", gmtime(&now));
+
+ while (1) {
+
+ int optindex = 0;
+
+ opt = getopt_long(argc, argv, "ho:c:d:l:p:sw", long_options, &optindex);
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'c':
+ options->config = optarg;
+ break;
+ case 'd':
+ options->duration = atoi(optarg) * 1000;
+ break;
+ case 'l':
+ options->loglvl = log_str2level(optarg);
+ break;
+ case 'h':
+ usage(basename(argv[0]));
+ break;
+ case 'p':
+ strcpy(options->postfix, optarg);
+ break;
+ case 'o':
+ strcpy(options->output, optarg);
+ break;
+ case 's':
+ options->logopt = TO_SYSLOG;
+ break;
+ case 'w':
+ options->overwrite = 1;
+ break;
+ default: /* '?' */
+ ERROR("Usage: %s --help\n", argv[0]);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int thermometer_add_tz(const char *path, const char *name, int polling,
+ struct thermometer *thermometer)
+{
+ int fd;
+ char tz_path[PATH_MAX];
+
+ sprintf(tz_path, CLASS_THERMAL"/%s/temp", path);
+
+ fd = open(tz_path, O_RDONLY);
+ if (fd < 0) {
+ ERROR("Failed to open '%s': %m\n", tz_path);
+ return -1;
+ }
+
+ thermometer->tz = realloc(thermometer->tz,
+ sizeof(*thermometer->tz) * (thermometer->nr_tz + 1));
+ if (!thermometer->tz) {
+ ERROR("Failed to allocate thermometer->tz\n");
+ return -1;
+ }
+
+ thermometer->tz[thermometer->nr_tz].fd_temp = fd;
+ thermometer->tz[thermometer->nr_tz].name = strdup(name);
+ thermometer->tz[thermometer->nr_tz].polling = polling;
+ thermometer->nr_tz++;
+
+ INFO("Added thermal zone '%s->%s (polling:%d)'\n", path, name, polling);
+
+ return 0;
+}
+
+static int thermometer_init(struct configuration *config,
+ struct thermometer *thermometer)
+{
+ DIR *dir;
+ struct dirent *dirent;
+ struct tz_regex *tz_regex;
+ const char *tz_dirname = "thermal_zone";
+
+ if (mainloop_init()) {
+ ERROR("Failed to start mainloop\n");
+ return -1;
+ }
+
+ dir = opendir(CLASS_THERMAL);
+ if (!dir) {
+ ERROR("failed to open '%s'\n", CLASS_THERMAL);
+ return -1;
+ }
+
+ while ((dirent = readdir(dir))) {
+ char tz_type[THERMAL_NAME_LENGTH];
+ char tz_path[PATH_MAX];
+ FILE *tz_file;
+
+ if (strncmp(dirent->d_name, tz_dirname, strlen(tz_dirname)))
+ continue;
+
+ sprintf(tz_path, CLASS_THERMAL"/%s/type", dirent->d_name);
+
+ tz_file = fopen(tz_path, "r");
+ if (!tz_file) {
+ ERROR("Failed to open '%s': %m", tz_path);
+ continue;
+ }
+
+ fscanf(tz_file, "%s", tz_type);
+
+ fclose(tz_file);
+
+ tz_regex = configuration_tz_match(tz_type, config);
+ if (!tz_regex)
+ continue;
+
+ if (thermometer_add_tz(dirent->d_name, tz_type,
+ tz_regex->polling, thermometer))
+ continue;
+ }
+
+ closedir(dir);
+
+ return 0;
+}
+
+static int timer_temperature_callback(int fd, void *arg)
+{
+ struct tz *tz = arg;
+ char buf[16] = { 0 };
+
+ pread(tz->fd_temp, buf, sizeof(buf), 0);
+
+ fprintf(tz->file_out, "%ld %s", getuptimeofday_ms(), buf);
+
+ read(fd, buf, sizeof(buf));
+
+ return 0;
+}
+
+static int thermometer_start(struct thermometer *thermometer,
+ struct options *options)
+{
+ struct itimerspec timer_it = { 0 };
+ char *path;
+ FILE *f;
+ int i;
+
+ INFO("Capturing %d thermal zone(s) temperature...\n", thermometer->nr_tz);
+
+ if (access(options->output, F_OK) && mkdir(options->output, 0700)) {
+ ERROR("Failed to create directory '%s'\n", options->output);
+ return -1;
+ }
+
+ for (i = 0; i < thermometer->nr_tz; i++) {
+
+ asprintf(&path, "%s/%s%s", options->output,
+ thermometer->tz[i].name, options->postfix);
+
+ if (!options->overwrite && !access(path, F_OK)) {
+ ERROR("'%s' already exists\n", path);
+ return -1;
+ }
+
+ f = fopen(path, "w");
+ if (!f) {
+ ERROR("Failed to create '%s':%m\n", path);
+ return -1;
+ }
+
+ fprintf(f, "timestamp(ms) %s(°mC)\n", thermometer->tz[i].name);
+
+ thermometer->tz[i].file_out = f;
+
+ DEBUG("Created '%s' file for thermal zone '%s'\n", path, thermometer->tz[i].name);
+
+ /*
+ * Create polling timer
+ */
+ thermometer->tz[i].fd_timer = timerfd_create(CLOCK_MONOTONIC, 0);
+ if (thermometer->tz[i].fd_timer < 0) {
+ ERROR("Failed to create timer for '%s': %m\n",
+ thermometer->tz[i].name);
+ return -1;
+ }
+
+ DEBUG("Watching '%s' every %d ms\n",
+ thermometer->tz[i].name, thermometer->tz[i].polling);
+
+ timer_it.it_interval = timer_it.it_value =
+ msec_to_timespec(thermometer->tz[i].polling);
+
+ if (timerfd_settime(thermometer->tz[i].fd_timer, 0,
+ &timer_it, NULL) < 0)
+ return -1;
+
+ if (mainloop_add(thermometer->tz[i].fd_timer,
+ timer_temperature_callback,
+ &thermometer->tz[i]))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int thermometer_execute(int argc, char *argv[], char *const envp[], pid_t *pid)
+{
+ if (!argc)
+ return 0;
+
+ *pid = fork();
+ if (*pid < 0) {
+ ERROR("Failed to fork process: %m");
+ return -1;
+ }
+
+ if (!(*pid)) {
+ execvpe(argv[0], argv, envp);
+ exit(1);
+ }
+
+ return 0;
+}
+
+static int kill_process(__maybe_unused int fd, void *arg)
+{
+ pid_t pid = *(pid_t *)arg;
+
+ if (kill(pid, SIGTERM))
+ ERROR("Failed to send SIGTERM signal to '%d': %p\n", pid);
+ else if (waitpid(pid, NULL, 0))
+ ERROR("Failed to wait pid '%d': %p\n", pid);
+
+ mainloop_exit();
+
+ return 0;
+}
+
+static int exit_mainloop(__maybe_unused int fd, __maybe_unused void *arg)
+{
+ mainloop_exit();
+ return 0;
+}
+
+static int thermometer_wait(struct options *options, pid_t pid)
+{
+ int fd;
+ sigset_t mask;
+
+ /*
+ * If there is a duration specified, we will exit the mainloop
+ * and gracefully close all the files which will flush the
+ * file system cache
+ */
+ if (options->duration) {
+ struct itimerspec timer_it = { 0 };
+
+ timer_it.it_value = msec_to_timespec(options->duration);
+
+ fd = timerfd_create(CLOCK_MONOTONIC, 0);
+ if (fd < 0) {
+ ERROR("Failed to create duration timer: %m\n");
+ return -1;
+ }
+
+ if (timerfd_settime(fd, 0, &timer_it, NULL)) {
+ ERROR("Failed to set timer time: %m\n");
+ return -1;
+ }
+
+ if (mainloop_add(fd, pid < 0 ? exit_mainloop : kill_process, &pid)) {
+ ERROR("Failed to set timer exit mainloop callback\n");
+ return -1;
+ }
+ }
+
+ /*
+ * We want to catch any keyboard interrupt, as well as child
+ * signals if any in order to exit properly
+ */
+ sigemptyset(&mask);
+ sigaddset(&mask, SIGINT);
+ sigaddset(&mask, SIGQUIT);
+ sigaddset(&mask, SIGCHLD);
+
+ if (sigprocmask(SIG_BLOCK, &mask, NULL)) {
+ ERROR("Failed to set sigprocmask: %m\n");
+ return -1;
+ }
+
+ fd = signalfd(-1, &mask, 0);
+ if (fd < 0) {
+ ERROR("Failed to set the signalfd: %m\n");
+ return -1;
+ }
+
+ if (mainloop_add(fd, exit_mainloop, NULL)) {
+ ERROR("Failed to set timer exit mainloop callback\n");
+ return -1;
+ }
+
+ return mainloop(-1);
+}
+
+static int thermometer_stop(struct thermometer *thermometer)
+{
+ int i;
+
+ INFO("Closing/flushing output files\n");
+
+ for (i = 0; i < thermometer->nr_tz; i++)
+ fclose(thermometer->tz[i].file_out);
+
+ return 0;
+}
+
+int main(int argc, char *argv[], char *const envp[])
+{
+ struct options options = {
+ .loglvl = LOG_DEBUG,
+ .logopt = TO_STDOUT,
+ .output = ".",
+ };
+ struct configuration config = { 0 };
+ struct thermometer thermometer = { 0 };
+
+ pid_t pid = -1;
+
+ if (options_init(argc, argv, &options))
+ return THERMOMETER_OPTION_ERROR;
+
+ if (log_init(options.loglvl, argv[0], options.logopt))
+ return THERMOMETER_LOG_ERROR;
+
+ if (configuration_init(options.config, &config))
+ return THERMOMETER_CONFIG_ERROR;
+
+ if (uptimeofday_init())
+ return THERMOMETER_TIME_ERROR;
+
+ if (thermometer_init(&config, &thermometer))
+ return THERMOMETER_INIT_ERROR;
+
+ if (thermometer_start(&thermometer, &options))
+ return THERMOMETER_RUNTIME_ERROR;
+
+ if (thermometer_execute(argc - optind, &argv[optind], envp, &pid))
+ return THERMOMETER_RUNTIME_ERROR;
+
+ if (thermometer_wait(&options, pid))
+ return THERMOMETER_RUNTIME_ERROR;
+
+ if (thermometer_stop(&thermometer))
+ return THERMOMETER_RUNTIME_ERROR;
+
+ return THERMOMETER_SUCCESS;
+}
diff --git a/tools/thermal/thermometer/thermometer.conf b/tools/thermal/thermometer/thermometer.conf
new file mode 100644
index 000000000000..02c6dab3b1b3
--- /dev/null
+++ b/tools/thermal/thermometer/thermometer.conf
@@ -0,0 +1,5 @@
+
+thermal-zones = (
+ { name = "cpu[0-9]-thermal";
+ polling = 100; }
+ )
diff --git a/tools/thermal/tmon/pid.c b/tools/thermal/tmon/pid.c
index 296f69c00c57..da20088285bd 100644
--- a/tools/thermal/tmon/pid.c
+++ b/tools/thermal/tmon/pid.c
@@ -27,7 +27,7 @@
/**************************************************************************
* PID (Proportional-Integral-Derivative) controller is commonly used in
- * linear control system, consider the the process.
+ * linear control system, consider the process.
* G(s) = U(s)/E(s)
* kp = proportional gain
* ki = integral gain
diff --git a/tools/thermal/tmon/tmon.h b/tools/thermal/tmon/tmon.h
index c9066ec104dd..44d16d778f04 100644
--- a/tools/thermal/tmon/tmon.h
+++ b/tools/thermal/tmon/tmon.h
@@ -27,6 +27,9 @@
#define NR_LINES_TZDATA 1
#define TMON_LOG_FILE "/var/tmp/tmon.log"
+#include <sys/time.h>
+#include <pthread.h>
+
extern unsigned long ticktime;
extern double time_elapsed;
extern unsigned long target_temp_user;
diff --git a/tools/tracing/rtla/Makefile b/tools/tracing/rtla/Makefile
index 11fb417abb42..3822f4ea5f49 100644
--- a/tools/tracing/rtla/Makefile
+++ b/tools/tracing/rtla/Makefile
@@ -23,6 +23,7 @@ $(call allow-override,LD_SO_CONF_PATH,/etc/ld.so.conf.d/)
$(call allow-override,LDCONFIG,ldconfig)
INSTALL = install
+MKDIR = mkdir
FOPTS := -flto=auto -ffat-lto-objects -fexceptions -fstack-protector-strong \
-fasynchronous-unwind-tables -fstack-clash-protection
WOPTS := -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -Wno-maybe-uninitialized
@@ -31,7 +32,7 @@ TRACEFS_HEADERS := $$($(PKG_CONFIG) --cflags libtracefs)
CFLAGS := -O -g -DVERSION=\"$(VERSION)\" $(FOPTS) $(MOPTS) $(WOPTS) $(TRACEFS_HEADERS)
LDFLAGS := -ggdb
-LIBS := $$($(PKG_CONFIG) --libs libtracefs) -lprocps
+LIBS := $$($(PKG_CONFIG) --libs libtracefs)
SRC := $(wildcard src/*.c)
HDR := $(wildcard src/*.h)
@@ -57,6 +58,41 @@ else
DOCSRC = $(SRCTREE)/../../../Documentation/tools/rtla/
endif
+LIBTRACEEVENT_MIN_VERSION = 1.5
+LIBTRACEFS_MIN_VERSION = 1.3
+
+TEST_LIBTRACEEVENT = $(shell sh -c "$(PKG_CONFIG) --atleast-version $(LIBTRACEEVENT_MIN_VERSION) libtraceevent > /dev/null 2>&1 || echo n")
+ifeq ("$(TEST_LIBTRACEEVENT)", "n")
+.PHONY: warning_traceevent
+warning_traceevent:
+ @echo "********************************************"
+ @echo "** NOTICE: libtraceevent version $(LIBTRACEEVENT_MIN_VERSION) or higher not found"
+ @echo "**"
+ @echo "** Consider installing the latest libtraceevent from your"
+ @echo "** distribution, e.g., 'dnf install libtraceevent' on Fedora,"
+ @echo "** or from source:"
+ @echo "**"
+ @echo "** https://git.kernel.org/pub/scm/libs/libtrace/libtraceevent.git/ "
+ @echo "**"
+ @echo "********************************************"
+endif
+
+TEST_LIBTRACEFS = $(shell sh -c "$(PKG_CONFIG) --atleast-version $(LIBTRACEFS_MIN_VERSION) libtracefs > /dev/null 2>&1 || echo n")
+ifeq ("$(TEST_LIBTRACEFS)", "n")
+.PHONY: warning_tracefs
+warning_tracefs:
+ @echo "********************************************"
+ @echo "** NOTICE: libtracefs version $(LIBTRACEFS_MIN_VERSION) or higher not found"
+ @echo "**"
+ @echo "** Consider installing the latest libtracefs from your"
+ @echo "** distribution, e.g., 'dnf install libtracefs' on Fedora,"
+ @echo "** or from source:"
+ @echo "**"
+ @echo "** https://git.kernel.org/pub/scm/libs/libtrace/libtracefs.git/ "
+ @echo "**"
+ @echo "********************************************"
+endif
+
.PHONY: all
all: rtla
@@ -68,7 +104,7 @@ static: $(OBJ)
.PHONY: install
install: doc_install
- $(INSTALL) -d -m 755 $(DESTDIR)$(BINDIR)
+ $(MKDIR) -p $(DESTDIR)$(BINDIR)
$(INSTALL) rtla -m 755 $(DESTDIR)$(BINDIR)
$(STRIP) $(DESTDIR)$(BINDIR)/rtla
@test ! -f $(DESTDIR)$(BINDIR)/osnoise || rm $(DESTDIR)$(BINDIR)/osnoise
diff --git a/tools/tracing/rtla/README.txt b/tools/tracing/rtla/README.txt
index 6c88446f7e74..4af3fd40f171 100644
--- a/tools/tracing/rtla/README.txt
+++ b/tools/tracing/rtla/README.txt
@@ -1,19 +1,16 @@
RTLA: Real-Time Linux Analysis tools
-The rtla is a meta-tool that includes a set of commands that
-aims to analyze the real-time properties of Linux. But, instead of
-testing Linux as a black box, rtla leverages kernel tracing
-capabilities to provide precise information about the properties
-and root causes of unexpected results.
+The rtla meta-tool includes a set of commands that aims to analyze
+the real-time properties of Linux. Instead of testing Linux as a black box,
+rtla leverages kernel tracing capabilities to provide precise information
+about the properties and root causes of unexpected results.
Installing RTLA
-RTLA depends on some libraries and tools. More precisely, it depends on the
-following libraries:
+RTLA depends on the following libraries and tools:
- libtracefs
- libtraceevent
- - procps
It also depends on python3-docutils to compile man pages.
diff --git a/tools/tracing/rtla/src/osnoise_hist.c b/tools/tracing/rtla/src/osnoise_hist.c
index b4380d45cacd..5d7ea479ac89 100644
--- a/tools/tracing/rtla/src/osnoise_hist.c
+++ b/tools/tracing/rtla/src/osnoise_hist.c
@@ -809,7 +809,7 @@ int osnoise_hist_main(int argc, char *argv[])
retval = set_comm_sched_attr("osnoise/", &params->sched_param);
if (retval) {
err_msg("Failed to set sched parameters\n");
- goto out_hist;
+ goto out_free;
}
}
@@ -819,7 +819,7 @@ int osnoise_hist_main(int argc, char *argv[])
record = osnoise_init_trace_tool("osnoise");
if (!record) {
err_msg("Failed to enable the trace instance\n");
- goto out_hist;
+ goto out_free;
}
if (params->events) {
@@ -869,6 +869,7 @@ int osnoise_hist_main(int argc, char *argv[])
out_hist:
trace_events_destroy(&record->trace, params->events);
params->events = NULL;
+out_free:
osnoise_free_histogram(tool->data);
out_destroy:
osnoise_destroy_tool(record);
diff --git a/tools/tracing/rtla/src/osnoise_top.c b/tools/tracing/rtla/src/osnoise_top.c
index 72c2fd6ce005..76479bfb2922 100644
--- a/tools/tracing/rtla/src/osnoise_top.c
+++ b/tools/tracing/rtla/src/osnoise_top.c
@@ -572,7 +572,7 @@ int osnoise_top_main(int argc, char **argv)
retval = osnoise_top_apply_config(tool, params);
if (retval) {
err_msg("Could not apply config\n");
- goto out_top;
+ goto out_free;
}
trace = &tool->trace;
@@ -580,14 +580,14 @@ int osnoise_top_main(int argc, char **argv)
retval = enable_osnoise(trace);
if (retval) {
err_msg("Failed to enable osnoise tracer\n");
- goto out_top;
+ goto out_free;
}
if (params->set_sched) {
retval = set_comm_sched_attr("osnoise/", &params->sched_param);
if (retval) {
err_msg("Failed to set sched parameters\n");
- goto out_top;
+ goto out_free;
}
}
@@ -597,7 +597,7 @@ int osnoise_top_main(int argc, char **argv)
record = osnoise_init_trace_tool("osnoise");
if (!record) {
err_msg("Failed to enable the trace instance\n");
- goto out_top;
+ goto out_free;
}
if (params->events) {
@@ -649,6 +649,7 @@ int osnoise_top_main(int argc, char **argv)
out_top:
trace_events_destroy(&record->trace, params->events);
params->events = NULL;
+out_free:
osnoise_free_top(tool->data);
osnoise_destroy_tool(record);
osnoise_destroy_tool(tool);
diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c
index dc908126c610..f3ec628f5e51 100644
--- a/tools/tracing/rtla/src/timerlat_hist.c
+++ b/tools/tracing/rtla/src/timerlat_hist.c
@@ -821,7 +821,7 @@ int timerlat_hist_main(int argc, char *argv[])
retval = timerlat_hist_apply_config(tool, params);
if (retval) {
err_msg("Could not apply config\n");
- goto out_hist;
+ goto out_free;
}
trace = &tool->trace;
@@ -829,14 +829,14 @@ int timerlat_hist_main(int argc, char *argv[])
retval = enable_timerlat(trace);
if (retval) {
err_msg("Failed to enable timerlat tracer\n");
- goto out_hist;
+ goto out_free;
}
if (params->set_sched) {
retval = set_comm_sched_attr("timerlat/", &params->sched_param);
if (retval) {
err_msg("Failed to set sched parameters\n");
- goto out_hist;
+ goto out_free;
}
}
@@ -844,7 +844,7 @@ int timerlat_hist_main(int argc, char *argv[])
dma_latency_fd = set_cpu_dma_latency(params->dma_latency);
if (dma_latency_fd < 0) {
err_msg("Could not set /dev/cpu_dma_latency.\n");
- goto out_hist;
+ goto out_free;
}
}
@@ -854,7 +854,7 @@ int timerlat_hist_main(int argc, char *argv[])
record = osnoise_init_trace_tool("timerlat");
if (!record) {
err_msg("Failed to enable the trace instance\n");
- goto out_hist;
+ goto out_free;
}
if (params->events) {
@@ -904,6 +904,7 @@ out_hist:
close(dma_latency_fd);
trace_events_destroy(&record->trace, params->events);
params->events = NULL;
+out_free:
timerlat_free_histogram(tool->data);
osnoise_destroy_tool(record);
osnoise_destroy_tool(tool);
diff --git a/tools/tracing/rtla/src/timerlat_top.c b/tools/tracing/rtla/src/timerlat_top.c
index 1f754c3df53f..35452a1d45e9 100644
--- a/tools/tracing/rtla/src/timerlat_top.c
+++ b/tools/tracing/rtla/src/timerlat_top.c
@@ -612,7 +612,7 @@ int timerlat_top_main(int argc, char *argv[])
retval = timerlat_top_apply_config(top, params);
if (retval) {
err_msg("Could not apply config\n");
- goto out_top;
+ goto out_free;
}
trace = &top->trace;
@@ -620,14 +620,14 @@ int timerlat_top_main(int argc, char *argv[])
retval = enable_timerlat(trace);
if (retval) {
err_msg("Failed to enable timerlat tracer\n");
- goto out_top;
+ goto out_free;
}
if (params->set_sched) {
retval = set_comm_sched_attr("timerlat/", &params->sched_param);
if (retval) {
err_msg("Failed to set sched parameters\n");
- goto out_top;
+ goto out_free;
}
}
@@ -635,7 +635,7 @@ int timerlat_top_main(int argc, char *argv[])
dma_latency_fd = set_cpu_dma_latency(params->dma_latency);
if (dma_latency_fd < 0) {
err_msg("Could not set /dev/cpu_dma_latency.\n");
- goto out_top;
+ goto out_free;
}
}
@@ -645,7 +645,7 @@ int timerlat_top_main(int argc, char *argv[])
record = osnoise_init_trace_tool("timerlat");
if (!record) {
err_msg("Failed to enable the trace instance\n");
- goto out_top;
+ goto out_free;
}
if (params->events) {
@@ -699,6 +699,7 @@ out_top:
close(dma_latency_fd);
trace_events_destroy(&record->trace, params->events);
params->events = NULL;
+out_free:
timerlat_free_top(top->data);
osnoise_destroy_tool(record);
osnoise_destroy_tool(top);
diff --git a/tools/tracing/rtla/src/utils.c b/tools/tracing/rtla/src/utils.c
index da2b590edaed..5352167a1e75 100644
--- a/tools/tracing/rtla/src/utils.c
+++ b/tools/tracing/rtla/src/utils.c
@@ -3,7 +3,7 @@
* Copyright (C) 2021 Red Hat Inc, Daniel Bristot de Oliveira <bristot@kernel.org>
*/
-#include <proc/readproc.h>
+#include <dirent.h>
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
@@ -255,50 +255,114 @@ int __set_sched_attr(int pid, struct sched_attr *attr)
retval = sched_setattr(pid, attr, flags);
if (retval < 0) {
- err_msg("boost_with_deadline failed to boost pid %d: %s\n",
+ err_msg("Failed to set sched attributes to the pid %d: %s\n",
pid, strerror(errno));
return 1;
}
return 0;
}
+
+/*
+ * procfs_is_workload_pid - check if a procfs entry contains a comm_prefix* comm
+ *
+ * Check if the procfs entry is a directory of a process, and then check if the
+ * process has a comm with the prefix set in char *comm_prefix. As the
+ * current users of this function only check for kernel threads, there is no
+ * need to check for the threads for the process.
+ *
+ * Return: True if the proc_entry contains a comm file with comm_prefix*.
+ * Otherwise returns false.
+ */
+static int procfs_is_workload_pid(const char *comm_prefix, struct dirent *proc_entry)
+{
+ char buffer[MAX_PATH];
+ int comm_fd, retval;
+ char *t_name;
+
+ if (proc_entry->d_type != DT_DIR)
+ return 0;
+
+ if (*proc_entry->d_name == '.')
+ return 0;
+
+ /* check if the string is a pid */
+ for (t_name = proc_entry->d_name; t_name; t_name++) {
+ if (!isdigit(*t_name))
+ break;
+ }
+
+ if (*t_name != '\0')
+ return 0;
+
+ snprintf(buffer, MAX_PATH, "/proc/%s/comm", proc_entry->d_name);
+ comm_fd = open(buffer, O_RDONLY);
+ if (comm_fd < 0)
+ return 0;
+
+ memset(buffer, 0, MAX_PATH);
+ retval = read(comm_fd, buffer, MAX_PATH);
+
+ close(comm_fd);
+
+ if (retval <= 0)
+ return 0;
+
+ retval = strncmp(comm_prefix, buffer, strlen(comm_prefix));
+ if (retval)
+ return 0;
+
+ /* comm already have \n */
+ debug_msg("Found workload pid:%s comm:%s", proc_entry->d_name, buffer);
+
+ return 1;
+}
+
/*
- * set_comm_sched_attr - set sched params to threads starting with char *comm
+ * set_comm_sched_attr - set sched params to threads starting with char *comm_prefix
*
- * This function uses procps to list the currently running threads and then
- * set the sched_attr *attr to the threads that start with char *comm. It is
+ * This function uses procfs to list the currently running threads and then set the
+ * sched_attr *attr to the threads that start with char *comm_prefix. It is
* mainly used to set the priority to the kernel threads created by the
* tracers.
*/
-int set_comm_sched_attr(const char *comm, struct sched_attr *attr)
+int set_comm_sched_attr(const char *comm_prefix, struct sched_attr *attr)
{
- int flags = PROC_FILLCOM | PROC_FILLSTAT;
- PROCTAB *ptp;
- proc_t task;
+ struct dirent *proc_entry;
+ DIR *procfs;
int retval;
- ptp = openproc(flags);
- if (!ptp) {
- err_msg("error openproc()\n");
- return -ENOENT;
+ if (strlen(comm_prefix) >= MAX_PATH) {
+ err_msg("Command prefix is too long: %d < strlen(%s)\n",
+ MAX_PATH, comm_prefix);
+ return 1;
}
- memset(&task, 0, sizeof(task));
+ procfs = opendir("/proc");
+ if (!procfs) {
+ err_msg("Could not open procfs\n");
+ return 1;
+ }
- while (readproc(ptp, &task)) {
- retval = strncmp(comm, task.cmd, strlen(comm));
- if (retval)
+ while ((proc_entry = readdir(procfs))) {
+
+ retval = procfs_is_workload_pid(comm_prefix, proc_entry);
+ if (!retval)
continue;
- retval = __set_sched_attr(task.tid, attr);
- if (retval)
+
+ /* procfs_is_workload_pid confirmed it is a pid */
+ retval = __set_sched_attr(atoi(proc_entry->d_name), attr);
+ if (retval) {
+ err_msg("Error setting sched attributes for pid:%s\n", proc_entry->d_name);
goto out_err;
- }
+ }
- closeproc(ptp);
+ debug_msg("Set sched attributes for pid:%s\n", proc_entry->d_name);
+ }
return 0;
out_err:
- closeproc(ptp);
+ closedir(procfs);
return 1;
}
diff --git a/tools/tracing/rtla/src/utils.h b/tools/tracing/rtla/src/utils.h
index fa08e374870a..5571afd3b549 100644
--- a/tools/tracing/rtla/src/utils.h
+++ b/tools/tracing/rtla/src/utils.h
@@ -6,6 +6,7 @@
* '18446744073709551615\0'
*/
#define BUFF_U64_STR_SIZE 24
+#define MAX_PATH 1024
#define container_of(ptr, type, member)({ \
const typeof(((type *)0)->member) *__mptr = (ptr); \
@@ -53,5 +54,5 @@ struct sched_attr {
};
int parse_prio(char *arg, struct sched_attr *sched_param);
-int set_comm_sched_attr(const char *comm, struct sched_attr *attr);
+int set_comm_sched_attr(const char *comm_prefix, struct sched_attr *attr);
int set_cpu_dma_latency(int32_t latency);
diff --git a/tools/usb/testusb.c b/tools/usb/testusb.c
index 69c3ead25313..cbaa1b9fdeac 100644
--- a/tools/usb/testusb.c
+++ b/tools/usb/testusb.c
@@ -96,7 +96,10 @@ struct usb_interface_descriptor {
enum usb_device_speed {
USB_SPEED_UNKNOWN = 0, /* enumerating */
USB_SPEED_LOW, USB_SPEED_FULL, /* usb 1.1 */
- USB_SPEED_HIGH /* usb 2.0 */
+ USB_SPEED_HIGH, /* usb 2.0 */
+ USB_SPEED_WIRELESS, /* wireless (usb 2.5) */
+ USB_SPEED_SUPER, /* usb 3.0 */
+ USB_SPEED_SUPER_PLUS, /* usb 3.1 */
};
/*-------------------------------------------------------------------------*/
@@ -104,11 +107,14 @@ enum usb_device_speed {
static char *speed (enum usb_device_speed s)
{
switch (s) {
- case USB_SPEED_UNKNOWN: return "unknown";
- case USB_SPEED_LOW: return "low";
- case USB_SPEED_FULL: return "full";
- case USB_SPEED_HIGH: return "high";
- default: return "??";
+ case USB_SPEED_UNKNOWN: return "unknown";
+ case USB_SPEED_LOW: return "low";
+ case USB_SPEED_FULL: return "full";
+ case USB_SPEED_HIGH: return "high";
+ case USB_SPEED_WIRELESS: return "wireless";
+ case USB_SPEED_SUPER: return "super";
+ case USB_SPEED_SUPER_PLUS: return "super-plus";
+ default: return "??";
}
}
@@ -482,7 +488,7 @@ usage:
}
if (not)
return 0;
- if (testdevs && testdevs->next == 0 && !device)
+ if (testdevs && !testdevs->next && !device)
device = testdevs->name;
for (entry = testdevs; entry; entry = entry->next) {
int status;
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index b1ed76d9a979..381dcc00cb62 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -80,9 +80,10 @@
#define KPF_SOFTDIRTY 40
#define KPF_ARCH_2 41
-/* [48-] take some arbitrary free slots for expanding overloaded flags
+/* [47-] take some arbitrary free slots for expanding overloaded flags
* not part of kernel API
*/
+#define KPF_ANON_EXCLUSIVE 47
#define KPF_READAHEAD 48
#define KPF_SLOB_FREE 49
#define KPF_SLUB_FROZEN 50
@@ -138,6 +139,7 @@ static const char * const page_flag_names[] = {
[KPF_SOFTDIRTY] = "f:softdirty",
[KPF_ARCH_2] = "H:arch_2",
+ [KPF_ANON_EXCLUSIVE] = "d:anon_exclusive",
[KPF_READAHEAD] = "I:readahead",
[KPF_SLOB_FREE] = "P:slob_free",
[KPF_SLUB_FROZEN] = "A:slub_frozen",
@@ -472,6 +474,10 @@ static int bit_mask_ok(uint64_t flags)
static uint64_t expand_overloaded_flags(uint64_t flags, uint64_t pme)
{
+ /* Anonymous pages overload PG_mappedtodisk */
+ if ((flags & BIT(ANON)) && (flags & BIT(MAPPEDTODISK)))
+ flags ^= BIT(MAPPEDTODISK) | BIT(ANON_EXCLUSIVE);
+
/* SLOB/SLUB overload several page flags */
if (flags & BIT(SLAB)) {
if (flags & BIT(PRIVATE))
diff --git a/tools/vm/page_owner_sort.c b/tools/vm/page_owner_sort.c
index 7d98e76c2291..c149427eb1c9 100644
--- a/tools/vm/page_owner_sort.c
+++ b/tools/vm/page_owner_sort.c
@@ -39,6 +39,7 @@ struct block_list {
int page_num;
pid_t pid;
pid_t tgid;
+ int allocator;
};
enum FILTER_BIT {
FILTER_UNRELEASE = 1<<1,
@@ -51,14 +52,39 @@ enum CULL_BIT {
CULL_PID = 1<<2,
CULL_TGID = 1<<3,
CULL_COMM = 1<<4,
- CULL_STACKTRACE = 1<<5
+ CULL_STACKTRACE = 1<<5,
+ CULL_ALLOCATOR = 1<<6
+};
+enum ALLOCATOR_BIT {
+ ALLOCATOR_CMA = 1<<1,
+ ALLOCATOR_SLAB = 1<<2,
+ ALLOCATOR_VMALLOC = 1<<3,
+ ALLOCATOR_OTHERS = 1<<4
+};
+enum ARG_TYPE {
+ ARG_TXT, ARG_COMM, ARG_STACKTRACE, ARG_ALLOC_TS, ARG_FREE_TS,
+ ARG_CULL_TIME, ARG_PAGE_NUM, ARG_PID, ARG_TGID, ARG_UNKNOWN, ARG_FREE,
+ ARG_ALLOCATOR
+};
+enum SORT_ORDER {
+ SORT_ASC = 1,
+ SORT_DESC = -1,
};
struct filter_condition {
- pid_t tgid;
- pid_t pid;
- char comm[TASK_COMM_LEN];
+ pid_t *pids;
+ pid_t *tgids;
+ char **comms;
+ int pids_size;
+ int tgids_size;
+ int comms_size;
+};
+struct sort_condition {
+ int (**cmps)(const void *, const void *);
+ int *signs;
+ int size;
};
static struct filter_condition fc;
+static struct sort_condition sc;
static regex_t order_pattern;
static regex_t pid_pattern;
static regex_t tgid_pattern;
@@ -70,16 +96,22 @@ static int list_size;
static int max_size;
static int cull;
static int filter;
+static bool debug_on;
-int read_block(char *buf, int buf_size, FILE *fin)
+static void set_single_cmp(int (*cmp)(const void *, const void *), int sign);
+
+int read_block(char *buf, char *ext_buf, int buf_size, FILE *fin)
{
char *curr = buf, *const buf_end = buf + buf_size;
while (buf_end - curr > 1 && fgets(curr, buf_end - curr, fin)) {
- if (*curr == '\n') /* empty line */
+ if (*curr == '\n') { /* empty line */
return curr - buf;
- if (!strncmp(curr, "PFN", 3))
+ }
+ if (!strncmp(curr, "PFN", 3)) {
+ strcpy(ext_buf, curr);
continue;
+ }
curr += strlen(curr);
}
@@ -104,14 +136,14 @@ static int compare_num(const void *p1, const void *p2)
{
const struct block_list *l1 = p1, *l2 = p2;
- return l2->num - l1->num;
+ return l1->num - l2->num;
}
static int compare_page_num(const void *p1, const void *p2)
{
const struct block_list *l1 = p1, *l2 = p2;
- return l2->page_num - l1->page_num;
+ return l1->page_num - l2->page_num;
}
static int compare_pid(const void *p1, const void *p2)
@@ -128,6 +160,13 @@ static int compare_tgid(const void *p1, const void *p2)
return l1->tgid - l2->tgid;
}
+static int compare_allocator(const void *p1, const void *p2)
+{
+ const struct block_list *l1 = p1, *l2 = p2;
+
+ return l1->allocator - l2->allocator;
+}
+
static int compare_comm(const void *p1, const void *p2)
{
const struct block_list *l1 = p1, *l2 = p2;
@@ -149,7 +188,6 @@ static int compare_free_ts(const void *p1, const void *p2)
return l1->free_ts_nsec < l2->free_ts_nsec ? -1 : 1;
}
-
static int compare_release(const void *p1, const void *p2)
{
const struct block_list *l1 = p1, *l2 = p2;
@@ -161,7 +199,6 @@ static int compare_release(const void *p1, const void *p2)
return l1->free_ts_nsec ? 1 : -1;
}
-
static int compare_cull_condition(const void *p1, const void *p2)
{
if (cull == 0)
@@ -176,9 +213,21 @@ static int compare_cull_condition(const void *p1, const void *p2)
return compare_comm(p1, p2);
if ((cull & CULL_UNRELEASE) && compare_release(p1, p2))
return compare_release(p1, p2);
+ if ((cull & CULL_ALLOCATOR) && compare_allocator(p1, p2))
+ return compare_allocator(p1, p2);
return 0;
}
+static int compare_sort_condition(const void *p1, const void *p2)
+{
+ int cmp = 0;
+
+ for (int i = 0; i < sc.size; ++i)
+ if (cmp == 0)
+ cmp = sc.signs[i] * sc.cmps[i](p1, p2);
+ return cmp;
+}
+
static int search_pattern(regex_t *pattern, char *pattern_str, char *buf)
{
int err, val_len;
@@ -186,7 +235,8 @@ static int search_pattern(regex_t *pattern, char *pattern_str, char *buf)
err = regexec(pattern, buf, 2, pmatch, REG_NOTBOL);
if (err != 0 || pmatch[1].rm_so == -1) {
- printf("no matching pattern in %s\n", buf);
+ if (debug_on)
+ fprintf(stderr, "no matching pattern in %s\n", buf);
return -1;
}
val_len = pmatch[1].rm_eo - pmatch[1].rm_so;
@@ -202,7 +252,7 @@ static void check_regcomp(regex_t *pattern, const char *regex)
err = regcomp(pattern, regex, REG_EXTENDED | REG_NEWLINE);
if (err != 0 || pattern->re_nsub != 1) {
- printf("Invalid pattern %s code %d\n", regex, err);
+ fprintf(stderr, "Invalid pattern %s code %d\n", regex, err);
exit(1);
}
}
@@ -251,7 +301,8 @@ static int get_page_num(char *buf)
errno = 0;
order_val = strtol(order_str, &endptr, 10);
if (order_val > 64 || errno != 0 || endptr == order_str || *endptr != '\0') {
- printf("wrong order in follow buf:\n%s\n", buf);
+ if (debug_on)
+ fprintf(stderr, "wrong order in follow buf:\n%s\n", buf);
return 0;
}
@@ -268,7 +319,8 @@ static pid_t get_pid(char *buf)
errno = 0;
pid = strtol(pid_str, &endptr, 10);
if (errno != 0 || endptr == pid_str || *endptr != '\0') {
- printf("wrong/invalid pid in follow buf:\n%s\n", buf);
+ if (debug_on)
+ fprintf(stderr, "wrong/invalid pid in follow buf:\n%s\n", buf);
return -1;
}
@@ -286,7 +338,8 @@ static pid_t get_tgid(char *buf)
errno = 0;
tgid = strtol(tgid_str, &endptr, 10);
if (errno != 0 || endptr == tgid_str || *endptr != '\0') {
- printf("wrong/invalid tgid in follow buf:\n%s\n", buf);
+ if (debug_on)
+ fprintf(stderr, "wrong/invalid tgid in follow buf:\n%s\n", buf);
return -1;
}
@@ -304,7 +357,8 @@ static __u64 get_ts_nsec(char *buf)
errno = 0;
ts_nsec = strtoull(ts_nsec_str, &endptr, 10);
if (errno != 0 || endptr == ts_nsec_str || *endptr != '\0') {
- printf("wrong ts_nsec in follow buf:\n%s\n", buf);
+ if (debug_on)
+ fprintf(stderr, "wrong ts_nsec in follow buf:\n%s\n", buf);
return -1;
}
@@ -321,7 +375,8 @@ static __u64 get_free_ts_nsec(char *buf)
errno = 0;
free_ts_nsec = strtoull(free_ts_nsec_str, &endptr, 10);
if (errno != 0 || endptr == free_ts_nsec_str || *endptr != '\0') {
- printf("wrong free_ts_nsec in follow buf:\n%s\n", buf);
+ if (debug_on)
+ fprintf(stderr, "wrong free_ts_nsec in follow buf:\n%s\n", buf);
return -1;
}
@@ -337,33 +392,104 @@ static char *get_comm(char *buf)
search_pattern(&comm_pattern, comm_str, buf);
errno = 0;
if (errno != 0) {
- printf("wrong comm in follow buf:\n%s\n", buf);
+ if (debug_on)
+ fprintf(stderr, "wrong comm in follow buf:\n%s\n", buf);
return NULL;
}
return comm_str;
}
+static int get_arg_type(const char *arg)
+{
+ if (!strcmp(arg, "pid") || !strcmp(arg, "p"))
+ return ARG_PID;
+ else if (!strcmp(arg, "tgid") || !strcmp(arg, "tg"))
+ return ARG_TGID;
+ else if (!strcmp(arg, "name") || !strcmp(arg, "n"))
+ return ARG_COMM;
+ else if (!strcmp(arg, "stacktrace") || !strcmp(arg, "st"))
+ return ARG_STACKTRACE;
+ else if (!strcmp(arg, "free") || !strcmp(arg, "f"))
+ return ARG_FREE;
+ else if (!strcmp(arg, "txt") || !strcmp(arg, "T"))
+ return ARG_TXT;
+ else if (!strcmp(arg, "free_ts") || !strcmp(arg, "ft"))
+ return ARG_FREE_TS;
+ else if (!strcmp(arg, "alloc_ts") || !strcmp(arg, "at"))
+ return ARG_ALLOC_TS;
+ else if (!strcmp(arg, "allocator") || !strcmp(arg, "ator"))
+ return ARG_ALLOCATOR;
+ else {
+ return ARG_UNKNOWN;
+ }
+}
+
+static int get_allocator(const char *buf, const char *migrate_info)
+{
+ char *tmp, *first_line, *second_line;
+ int allocator = 0;
+
+ if (strstr(migrate_info, "CMA"))
+ allocator |= ALLOCATOR_CMA;
+ if (strstr(migrate_info, "slab"))
+ allocator |= ALLOCATOR_SLAB;
+ tmp = strstr(buf, "__vmalloc_node_range");
+ if (tmp) {
+ second_line = tmp;
+ while (*tmp != '\n')
+ tmp--;
+ tmp--;
+ while (*tmp != '\n')
+ tmp--;
+ first_line = ++tmp;
+ tmp = strstr(tmp, "alloc_pages");
+ if (tmp && first_line <= tmp && tmp < second_line)
+ allocator |= ALLOCATOR_VMALLOC;
+ }
+ if (allocator == 0)
+ allocator = ALLOCATOR_OTHERS;
+ return allocator;
+}
+
+static bool match_num_list(int num, int *list, int list_size)
+{
+ for (int i = 0; i < list_size; ++i)
+ if (list[i] == num)
+ return true;
+ return false;
+}
+
+static bool match_str_list(const char *str, char **list, int list_size)
+{
+ for (int i = 0; i < list_size; ++i)
+ if (!strcmp(list[i], str))
+ return true;
+ return false;
+}
+
static bool is_need(char *buf)
{
if ((filter & FILTER_UNRELEASE) && get_free_ts_nsec(buf) != 0)
return false;
- if ((filter & FILTER_PID) && get_pid(buf) != fc.pid)
+ if ((filter & FILTER_PID) && !match_num_list(get_pid(buf), fc.pids, fc.pids_size))
return false;
- if ((filter & FILTER_TGID) && get_tgid(buf) != fc.tgid)
+ if ((filter & FILTER_TGID) &&
+ !match_num_list(get_tgid(buf), fc.tgids, fc.tgids_size))
return false;
char *comm = get_comm(buf);
if ((filter & FILTER_COMM) &&
- strncmp(comm, fc.comm, TASK_COMM_LEN) != 0) {
+ !match_str_list(comm, fc.comms, fc.comms_size)) {
free(comm);
return false;
}
+ free(comm);
return true;
}
-static void add_list(char *buf, int len)
+static void add_list(char *buf, int len, char *ext_buf)
{
if (list_size != 0 &&
len == list[list_size-1].len &&
@@ -373,7 +499,7 @@ static void add_list(char *buf, int len)
return;
}
if (list_size == max_size) {
- printf("max_size too small??\n");
+ fprintf(stderr, "max_size too small??\n");
exit(1);
}
if (!is_need(buf))
@@ -383,7 +509,7 @@ static void add_list(char *buf, int len)
list[list_size].comm = get_comm(buf);
list[list_size].txt = malloc(len+1);
if (!list[list_size].txt) {
- printf("Out of memory\n");
+ fprintf(stderr, "Out of memory\n");
exit(1);
}
memcpy(list[list_size].txt, buf, len);
@@ -397,6 +523,7 @@ static void add_list(char *buf, int len)
list[list_size].stacktrace++;
list[list_size].ts_nsec = get_ts_nsec(buf);
list[list_size].free_ts_nsec = get_free_ts_nsec(buf);
+ list[list_size].allocator = get_allocator(buf, ext_buf);
list_size++;
if (list_size % 1000 == 0) {
printf("loaded %d\r", list_size);
@@ -409,25 +536,130 @@ static bool parse_cull_args(const char *arg_str)
int size = 0;
char **args = explode(',', arg_str, &size);
- for (int i = 0; i < size; ++i)
- if (!strcmp(args[i], "pid") || !strcmp(args[i], "p"))
+ for (int i = 0; i < size; ++i) {
+ int arg_type = get_arg_type(args[i]);
+
+ if (arg_type == ARG_PID)
cull |= CULL_PID;
- else if (!strcmp(args[i], "tgid") || !strcmp(args[i], "tg"))
+ else if (arg_type == ARG_TGID)
cull |= CULL_TGID;
- else if (!strcmp(args[i], "name") || !strcmp(args[i], "n"))
+ else if (arg_type == ARG_COMM)
cull |= CULL_COMM;
- else if (!strcmp(args[i], "stacktrace") || !strcmp(args[i], "st"))
+ else if (arg_type == ARG_STACKTRACE)
cull |= CULL_STACKTRACE;
- else if (!strcmp(args[i], "free") || !strcmp(args[i], "f"))
+ else if (arg_type == ARG_FREE)
cull |= CULL_UNRELEASE;
+ else if (arg_type == ARG_ALLOCATOR)
+ cull |= CULL_ALLOCATOR;
else {
free_explode(args, size);
return false;
}
+ }
free_explode(args, size);
+ if (sc.size == 0)
+ set_single_cmp(compare_num, SORT_DESC);
return true;
}
+static void set_single_cmp(int (*cmp)(const void *, const void *), int sign)
+{
+ if (sc.signs == NULL || sc.size < 1)
+ sc.signs = calloc(1, sizeof(int));
+ sc.signs[0] = sign;
+ if (sc.cmps == NULL || sc.size < 1)
+ sc.cmps = calloc(1, sizeof(int *));
+ sc.cmps[0] = cmp;
+ sc.size = 1;
+}
+
+static bool parse_sort_args(const char *arg_str)
+{
+ int size = 0;
+
+ if (sc.size != 0) { /* reset sort_condition */
+ free(sc.signs);
+ free(sc.cmps);
+ size = 0;
+ }
+
+ char **args = explode(',', arg_str, &size);
+
+ sc.signs = calloc(size, sizeof(int));
+ sc.cmps = calloc(size, sizeof(int *));
+ for (int i = 0; i < size; ++i) {
+ int offset = 0;
+
+ sc.signs[i] = SORT_ASC;
+ if (args[i][0] == '-' || args[i][0] == '+') {
+ if (args[i][0] == '-')
+ sc.signs[i] = SORT_DESC;
+ offset = 1;
+ }
+
+ int arg_type = get_arg_type(args[i]+offset);
+
+ if (arg_type == ARG_PID)
+ sc.cmps[i] = compare_pid;
+ else if (arg_type == ARG_TGID)
+ sc.cmps[i] = compare_tgid;
+ else if (arg_type == ARG_COMM)
+ sc.cmps[i] = compare_comm;
+ else if (arg_type == ARG_STACKTRACE)
+ sc.cmps[i] = compare_stacktrace;
+ else if (arg_type == ARG_ALLOC_TS)
+ sc.cmps[i] = compare_ts;
+ else if (arg_type == ARG_FREE_TS)
+ sc.cmps[i] = compare_free_ts;
+ else if (arg_type == ARG_TXT)
+ sc.cmps[i] = compare_txt;
+ else if (arg_type == ARG_ALLOCATOR)
+ sc.cmps[i] = compare_allocator;
+ else {
+ free_explode(args, size);
+ sc.size = 0;
+ return false;
+ }
+ }
+ sc.size = size;
+ free_explode(args, size);
+ return true;
+}
+
+static int *parse_nums_list(char *arg_str, int *list_size)
+{
+ int size = 0;
+ char **args = explode(',', arg_str, &size);
+ int *list = calloc(size, sizeof(int));
+
+ errno = 0;
+ for (int i = 0; i < size; ++i) {
+ char *endptr = NULL;
+
+ list[i] = strtol(args[i], &endptr, 10);
+ if (errno != 0 || endptr == args[i] || *endptr != '\0') {
+ free(list);
+ return NULL;
+ }
+ }
+ *list_size = size;
+ free_explode(args, size);
+ return list;
+}
+
+static void print_allocator(FILE *out, int allocator)
+{
+ fprintf(out, "allocated by ");
+ if (allocator & ALLOCATOR_CMA)
+ fprintf(out, "CMA ");
+ if (allocator & ALLOCATOR_SLAB)
+ fprintf(out, "SLAB ");
+ if (allocator & ALLOCATOR_VMALLOC)
+ fprintf(out, "VMALLOC ");
+ if (allocator & ALLOCATOR_OTHERS)
+ fprintf(out, "OTHERS ");
+}
+
#define BUF_SIZE (128 * 1024)
static void usage(void)
@@ -442,19 +674,20 @@ static void usage(void)
"-a\t\tSort by memory allocate time.\n"
"-r\t\tSort by memory release time.\n"
"-f\t\tFilter out the information of blocks whose memory has been released.\n"
- "--pid <PID>\tSelect by pid. This selects the information of blocks whose process ID number equals to <PID>.\n"
- "--tgid <TGID>\tSelect by tgid. This selects the information of blocks whose Thread Group ID number equals to <TGID>.\n"
- "--name <command>\n\t\tSelect by command name. This selects the information of blocks whose command name identical to <command>.\n"
- "--cull <rules>\tCull by user-defined rules. <rules> is a single argument in the form of a comma-separated list with some common fields predefined\n"
+ "-d\t\tPrint debug information.\n"
+ "--pid <pidlist>\tSelect by pid. This selects the information of blocks whose process ID numbers appear in <pidlist>.\n"
+ "--tgid <tgidlist>\tSelect by tgid. This selects the information of blocks whose Thread Group ID numbers appear in <tgidlist>.\n"
+ "--name <cmdlist>\n\t\tSelect by command name. This selects the information of blocks whose command name appears in <cmdlist>.\n"
+ "--cull <rules>\tCull by user-defined rules.<rules> is a single argument in the form of a comma-separated list with some common fields predefined\n"
+ "--sort <order>\tSpecify sort order as: [+|-]key[,[+|-]key[,...]]\n"
);
}
int main(int argc, char **argv)
{
- int (*cmp)(const void *, const void *) = compare_num;
FILE *fin, *fout;
- char *buf, *endptr;
- int ret, i, count;
+ char *buf, *ext_buf;
+ int i, count;
struct stat st;
int opt;
struct option longopts[] = {
@@ -462,64 +695,74 @@ int main(int argc, char **argv)
{ "tgid", required_argument, NULL, 2 },
{ "name", required_argument, NULL, 3 },
{ "cull", required_argument, NULL, 4 },
+ { "sort", required_argument, NULL, 5 },
{ 0, 0, 0, 0},
};
- while ((opt = getopt_long(argc, argv, "afmnprstP", longopts, NULL)) != -1)
+ while ((opt = getopt_long(argc, argv, "adfmnprstP", longopts, NULL)) != -1)
switch (opt) {
case 'a':
- cmp = compare_ts;
+ set_single_cmp(compare_ts, SORT_ASC);
+ break;
+ case 'd':
+ debug_on = true;
break;
case 'f':
filter = filter | FILTER_UNRELEASE;
break;
case 'm':
- cmp = compare_page_num;
+ set_single_cmp(compare_page_num, SORT_DESC);
break;
case 'p':
- cmp = compare_pid;
+ set_single_cmp(compare_pid, SORT_ASC);
break;
case 'r':
- cmp = compare_free_ts;
+ set_single_cmp(compare_free_ts, SORT_ASC);
break;
case 's':
- cmp = compare_stacktrace;
+ set_single_cmp(compare_stacktrace, SORT_ASC);
break;
case 't':
- cmp = compare_num;
+ set_single_cmp(compare_num, SORT_DESC);
break;
case 'P':
- cmp = compare_tgid;
+ set_single_cmp(compare_tgid, SORT_ASC);
break;
case 'n':
- cmp = compare_comm;
+ set_single_cmp(compare_comm, SORT_ASC);
break;
case 1:
filter = filter | FILTER_PID;
- errno = 0;
- fc.pid = strtol(optarg, &endptr, 10);
- if (errno != 0 || endptr == optarg || *endptr != '\0') {
- printf("wrong/invalid pid in from the command line:%s\n", optarg);
+ fc.pids = parse_nums_list(optarg, &fc.pids_size);
+ if (fc.pids == NULL) {
+ fprintf(stderr, "wrong/invalid pid in from the command line:%s\n",
+ optarg);
exit(1);
}
break;
case 2:
filter = filter | FILTER_TGID;
- errno = 0;
- fc.tgid = strtol(optarg, &endptr, 10);
- if (errno != 0 || endptr == optarg || *endptr != '\0') {
- printf("wrong/invalid tgid in from the command line:%s\n", optarg);
+ fc.tgids = parse_nums_list(optarg, &fc.tgids_size);
+ if (fc.tgids == NULL) {
+ fprintf(stderr, "wrong/invalid tgid in from the command line:%s\n",
+ optarg);
exit(1);
}
break;
case 3:
filter = filter | FILTER_COMM;
- strncpy(fc.comm, optarg, TASK_COMM_LEN);
- fc.comm[TASK_COMM_LEN-1] = '\0';
+ fc.comms = explode(',', optarg, &fc.comms_size);
break;
case 4:
if (!parse_cull_args(optarg)) {
- printf("wrong argument after --cull in from the command line:%s\n",
+ fprintf(stderr, "wrong argument after --cull option:%s\n",
+ optarg);
+ exit(1);
+ }
+ break;
+ case 5:
+ if (!parse_sort_args(optarg)) {
+ fprintf(stderr, "wrong argument after --sort option:%s\n",
optarg);
exit(1);
}
@@ -553,17 +796,18 @@ int main(int argc, char **argv)
list = malloc(max_size * sizeof(*list));
buf = malloc(BUF_SIZE);
- if (!list || !buf) {
- printf("Out of memory\n");
+ ext_buf = malloc(BUF_SIZE);
+ if (!list || !buf || !ext_buf) {
+ fprintf(stderr, "Out of memory\n");
exit(1);
}
for ( ; ; ) {
- ret = read_block(buf, BUF_SIZE, fin);
- if (ret < 0)
- break;
+ int buf_len = read_block(buf, ext_buf, BUF_SIZE, fin);
- add_list(buf, ret);
+ if (buf_len < 0)
+ break;
+ add_list(buf, buf_len, ext_buf);
}
printf("loaded %d\n", list_size);
@@ -584,12 +828,14 @@ int main(int argc, char **argv)
}
}
- qsort(list, count, sizeof(list[0]), cmp);
+ qsort(list, count, sizeof(list[0]), compare_sort_condition);
for (i = 0; i < count; i++) {
- if (cull == 0)
- fprintf(fout, "%d times, %d pages:\n%s\n",
- list[i].num, list[i].page_num, list[i].txt);
+ if (cull == 0) {
+ fprintf(fout, "%d times, %d pages, ", list[i].num, list[i].page_num);
+ print_allocator(fout, list[i].allocator);
+ fprintf(fout, ":\n%s\n", list[i].txt);
+ }
else {
fprintf(fout, "%d times, %d pages",
list[i].num, list[i].page_num);
@@ -599,6 +845,10 @@ int main(int argc, char **argv)
fprintf(fout, ", TGID %d", list[i].pid);
if (cull & CULL_COMM || filter & FILTER_COMM)
fprintf(fout, ", task_comm_name: %s", list[i].comm);
+ if (cull & CULL_ALLOCATOR) {
+ fprintf(fout, ", ");
+ print_allocator(fout, list[i].allocator);
+ }
if (cull & CULL_UNRELEASE)
fprintf(fout, " (%s)",
list[i].free_ts_nsec ? "UNRELEASED" : "RELEASED");
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
index 9b68658b6bb8..5b98f3ee58a5 100644
--- a/tools/vm/slabinfo.c
+++ b/tools/vm/slabinfo.c
@@ -233,6 +233,24 @@ static unsigned long read_slab_obj(struct slabinfo *s, const char *name)
return l;
}
+static unsigned long read_debug_slab_obj(struct slabinfo *s, const char *name)
+{
+ char x[128];
+ FILE *f;
+ size_t l;
+
+ snprintf(x, 128, "/sys/kernel/debug/slab/%s/%s", s->name, name);
+ f = fopen(x, "r");
+ if (!f) {
+ buffer[0] = 0;
+ l = 0;
+ } else {
+ l = fread(buffer, 1, sizeof(buffer), f);
+ buffer[l] = 0;
+ fclose(f);
+ }
+ return l;
+}
/*
* Put a size string together
@@ -409,14 +427,18 @@ static void show_tracking(struct slabinfo *s)
{
printf("\n%s: Kernel object allocation\n", s->name);
printf("-----------------------------------------------------------------------\n");
- if (read_slab_obj(s, "alloc_calls"))
+ if (read_debug_slab_obj(s, "alloc_traces"))
+ printf("%s", buffer);
+ else if (read_slab_obj(s, "alloc_calls"))
printf("%s", buffer);
else
printf("No Data\n");
printf("\n%s: Kernel object freeing\n", s->name);
printf("------------------------------------------------------------------------\n");
- if (read_slab_obj(s, "free_calls"))
+ if (read_debug_slab_obj(s, "free_traces"))
+ printf("%s", buffer);
+ else if (read_slab_obj(s, "free_calls"))
printf("%s", buffer);
else
printf("No Data\n");